diff options
398 files changed, 6481 insertions, 2562 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 1150444a21ab..1de51110e622 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1425,6 +1425,12 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1425 | 1425 | ||
1426 | nolapic_timer [X86-32,APIC] Do not use the local APIC timer. | 1426 | nolapic_timer [X86-32,APIC] Do not use the local APIC timer. |
1427 | 1427 | ||
1428 | nox2apic [X86-64,APIC] Do not enable x2APIC mode. | ||
1429 | |||
1430 | x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of | ||
1431 | default x2apic cluster mode on platforms | ||
1432 | supporting x2apic. | ||
1433 | |||
1428 | noltlbs [PPC] Do not use large page/tlb entries for kernel | 1434 | noltlbs [PPC] Do not use large page/tlb entries for kernel |
1429 | lowmem mapping on PPC40x. | 1435 | lowmem mapping on PPC40x. |
1430 | 1436 | ||
@@ -1882,6 +1888,12 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1882 | shapers= [NET] | 1888 | shapers= [NET] |
1883 | Maximal number of shapers. | 1889 | Maximal number of shapers. |
1884 | 1890 | ||
1891 | show_msr= [x86] show boot-time MSR settings | ||
1892 | Format: { <integer> } | ||
1893 | Show boot-time (BIOS-initialized) MSR settings. | ||
1894 | The parameter means the number of CPUs to show, | ||
1895 | for example 1 means boot CPU only. | ||
1896 | |||
1885 | sim710= [SCSI,HW] | 1897 | sim710= [SCSI,HW] |
1886 | See header of drivers/scsi/sim710.c. | 1898 | See header of drivers/scsi/sim710.c. |
1887 | 1899 | ||
diff --git a/arch/um/sys-x86_64/syscall_table.c b/arch/um/sys-x86_64/syscall_table.c index c128eb897008..32f5fbe2d0d2 100644 --- a/arch/um/sys-x86_64/syscall_table.c +++ b/arch/um/sys-x86_64/syscall_table.c | |||
@@ -41,12 +41,12 @@ | |||
41 | #define stub_rt_sigreturn sys_rt_sigreturn | 41 | #define stub_rt_sigreturn sys_rt_sigreturn |
42 | 42 | ||
43 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | 43 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; |
44 | #undef _ASM_X86_64_UNISTD_H_ | 44 | #undef ASM_X86__UNISTD_64_H |
45 | #include <asm-x86/unistd_64.h> | 45 | #include <asm-x86/unistd_64.h> |
46 | 46 | ||
47 | #undef __SYSCALL | 47 | #undef __SYSCALL |
48 | #define __SYSCALL(nr, sym) [ nr ] = sym, | 48 | #define __SYSCALL(nr, sym) [ nr ] = sym, |
49 | #undef _ASM_X86_64_UNISTD_H_ | 49 | #undef ASM_X86__UNISTD_64_H |
50 | 50 | ||
51 | typedef void (*sys_call_ptr_t)(void); | 51 | typedef void (*sys_call_ptr_t)(void); |
52 | 52 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ed92864d1325..21ef9dd36187 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1643,6 +1643,14 @@ config DMAR_FLOPPY_WA | |||
1643 | workaround will setup a 1:1 mapping for the first | 1643 | workaround will setup a 1:1 mapping for the first |
1644 | 16M to make floppy (an ISA device) work. | 1644 | 16M to make floppy (an ISA device) work. |
1645 | 1645 | ||
1646 | config INTR_REMAP | ||
1647 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | ||
1648 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL | ||
1649 | help | ||
1650 | Supports Interrupt remapping for IO-APIC and MSI devices. | ||
1651 | To use x2apic mode in the CPU's which support x2APIC enhancements or | ||
1652 | to support platforms with CPU's having > 8 bit APIC ID, say Y. | ||
1653 | |||
1646 | source "drivers/pci/pcie/Kconfig" | 1654 | source "drivers/pci/pcie/Kconfig" |
1647 | 1655 | ||
1648 | source "drivers/pci/Kconfig" | 1656 | source "drivers/pci/Kconfig" |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2c518fbc52ec..6156ac25ff8c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -415,3 +415,73 @@ config X86_MINIMUM_CPU_FAMILY | |||
415 | config X86_DEBUGCTLMSR | 415 | config X86_DEBUGCTLMSR |
416 | def_bool y | 416 | def_bool y |
417 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) | 417 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) |
418 | |||
419 | menuconfig PROCESSOR_SELECT | ||
420 | default y | ||
421 | bool "Supported processor vendors" if EMBEDDED | ||
422 | help | ||
423 | This lets you choose what x86 vendor support code your kernel | ||
424 | will include. | ||
425 | |||
426 | config CPU_SUP_INTEL_32 | ||
427 | default y | ||
428 | bool "Support Intel processors" if PROCESSOR_SELECT | ||
429 | depends on !64BIT | ||
430 | help | ||
431 | This enables extended support for Intel processors | ||
432 | |||
433 | config CPU_SUP_INTEL_64 | ||
434 | default y | ||
435 | bool "Support Intel processors" if PROCESSOR_SELECT | ||
436 | depends on 64BIT | ||
437 | help | ||
438 | This enables extended support for Intel processors | ||
439 | |||
440 | config CPU_SUP_CYRIX_32 | ||
441 | default y | ||
442 | bool "Support Cyrix processors" if PROCESSOR_SELECT | ||
443 | depends on !64BIT | ||
444 | help | ||
445 | This enables extended support for Cyrix processors | ||
446 | |||
447 | config CPU_SUP_AMD_32 | ||
448 | default y | ||
449 | bool "Support AMD processors" if PROCESSOR_SELECT | ||
450 | depends on !64BIT | ||
451 | help | ||
452 | This enables extended support for AMD processors | ||
453 | |||
454 | config CPU_SUP_AMD_64 | ||
455 | default y | ||
456 | bool "Support AMD processors" if PROCESSOR_SELECT | ||
457 | depends on 64BIT | ||
458 | help | ||
459 | This enables extended support for AMD processors | ||
460 | |||
461 | config CPU_SUP_CENTAUR_32 | ||
462 | default y | ||
463 | bool "Support Centaur processors" if PROCESSOR_SELECT | ||
464 | depends on !64BIT | ||
465 | help | ||
466 | This enables extended support for Centaur processors | ||
467 | |||
468 | config CPU_SUP_CENTAUR_64 | ||
469 | default y | ||
470 | bool "Support Centaur processors" if PROCESSOR_SELECT | ||
471 | depends on 64BIT | ||
472 | help | ||
473 | This enables extended support for Centaur processors | ||
474 | |||
475 | config CPU_SUP_TRANSMETA_32 | ||
476 | default y | ||
477 | bool "Support Transmeta processors" if PROCESSOR_SELECT | ||
478 | depends on !64BIT | ||
479 | help | ||
480 | This enables extended support for Transmeta processors | ||
481 | |||
482 | config CPU_SUP_UMC_32 | ||
483 | default y | ||
484 | bool "Support UMC processors" if PROCESSOR_SELECT | ||
485 | depends on !64BIT | ||
486 | help | ||
487 | This enables extended support for UMC processors | ||
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 9fea73706479..aaf5a2131efc 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | #undef CONFIG_PARAVIRT | 17 | #undef CONFIG_PARAVIRT |
18 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
19 | #define _ASM_DESC_H_ 1 | 19 | #define ASM_X86__DESC_H 1 |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_X86_64 | 22 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4b9ae7c56748..4d3ff037201f 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
@@ -38,12 +38,12 @@ static const u32 req_flags[NCAPINTS] = | |||
38 | { | 38 | { |
39 | REQUIRED_MASK0, | 39 | REQUIRED_MASK0, |
40 | REQUIRED_MASK1, | 40 | REQUIRED_MASK1, |
41 | REQUIRED_MASK2, | 41 | 0, /* REQUIRED_MASK2 not implemented in this file */ |
42 | REQUIRED_MASK3, | 42 | 0, /* REQUIRED_MASK3 not implemented in this file */ |
43 | REQUIRED_MASK4, | 43 | REQUIRED_MASK4, |
44 | REQUIRED_MASK5, | 44 | 0, /* REQUIRED_MASK5 not implemented in this file */ |
45 | REQUIRED_MASK6, | 45 | REQUIRED_MASK6, |
46 | REQUIRED_MASK7, | 46 | 0, /* REQUIRED_MASK7 not implemented in this file */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) | 49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) |
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c index bbe76953bae9..4589caa3e9d1 100644 --- a/arch/x86/boot/mkcpustr.c +++ b/arch/x86/boot/mkcpustr.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <stdio.h> | 16 | #include <stdio.h> |
17 | 17 | ||
18 | #include "../kernel/cpu/feature_names.c" | 18 | #include "../kernel/cpu/capflags.c" |
19 | 19 | ||
20 | #if NCAPFLAGS > 8 | 20 | #if NCAPFLAGS > 8 |
21 | # error "Need to adjust the boot code handling of CPUID strings" | 21 | # error "Need to adjust the boot code handling of CPUID strings" |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 20af4c79579a..f25a10124005 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -179,9 +179,10 @@ struct sigframe | |||
179 | u32 pretcode; | 179 | u32 pretcode; |
180 | int sig; | 180 | int sig; |
181 | struct sigcontext_ia32 sc; | 181 | struct sigcontext_ia32 sc; |
182 | struct _fpstate_ia32 fpstate; | 182 | struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */ |
183 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; | 183 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; |
184 | char retcode[8]; | 184 | char retcode[8]; |
185 | /* fp state follows here */ | ||
185 | }; | 186 | }; |
186 | 187 | ||
187 | struct rt_sigframe | 188 | struct rt_sigframe |
@@ -192,8 +193,8 @@ struct rt_sigframe | |||
192 | u32 puc; | 193 | u32 puc; |
193 | compat_siginfo_t info; | 194 | compat_siginfo_t info; |
194 | struct ucontext_ia32 uc; | 195 | struct ucontext_ia32 uc; |
195 | struct _fpstate_ia32 fpstate; | ||
196 | char retcode[8]; | 196 | char retcode[8]; |
197 | /* fp state follows here */ | ||
197 | }; | 198 | }; |
198 | 199 | ||
199 | #define COPY(x) { \ | 200 | #define COPY(x) { \ |
@@ -215,7 +216,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
215 | unsigned int *peax) | 216 | unsigned int *peax) |
216 | { | 217 | { |
217 | unsigned int tmpflags, gs, oldgs, err = 0; | 218 | unsigned int tmpflags, gs, oldgs, err = 0; |
218 | struct _fpstate_ia32 __user *buf; | 219 | void __user *buf; |
219 | u32 tmp; | 220 | u32 tmp; |
220 | 221 | ||
221 | /* Always make any pending restarted system calls return -EINTR */ | 222 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -259,26 +260,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
259 | 260 | ||
260 | err |= __get_user(tmp, &sc->fpstate); | 261 | err |= __get_user(tmp, &sc->fpstate); |
261 | buf = compat_ptr(tmp); | 262 | buf = compat_ptr(tmp); |
262 | if (buf) { | 263 | err |= restore_i387_xstate_ia32(buf); |
263 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
264 | goto badframe; | ||
265 | err |= restore_i387_ia32(buf); | ||
266 | } else { | ||
267 | struct task_struct *me = current; | ||
268 | |||
269 | if (used_math()) { | ||
270 | clear_fpu(me); | ||
271 | clear_used_math(); | ||
272 | } | ||
273 | } | ||
274 | 264 | ||
275 | err |= __get_user(tmp, &sc->ax); | 265 | err |= __get_user(tmp, &sc->ax); |
276 | *peax = tmp; | 266 | *peax = tmp; |
277 | 267 | ||
278 | return err; | 268 | return err; |
279 | |||
280 | badframe: | ||
281 | return 1; | ||
282 | } | 269 | } |
283 | 270 | ||
284 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) | 271 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) |
@@ -350,7 +337,7 @@ badframe: | |||
350 | */ | 337 | */ |
351 | 338 | ||
352 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | 339 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, |
353 | struct _fpstate_ia32 __user *fpstate, | 340 | void __user *fpstate, |
354 | struct pt_regs *regs, unsigned int mask) | 341 | struct pt_regs *regs, unsigned int mask) |
355 | { | 342 | { |
356 | int tmp, err = 0; | 343 | int tmp, err = 0; |
@@ -381,7 +368,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
381 | err |= __put_user((u32)regs->flags, &sc->flags); | 368 | err |= __put_user((u32)regs->flags, &sc->flags); |
382 | err |= __put_user((u32)regs->sp, &sc->sp_at_signal); | 369 | err |= __put_user((u32)regs->sp, &sc->sp_at_signal); |
383 | 370 | ||
384 | tmp = save_i387_ia32(fpstate); | 371 | tmp = save_i387_xstate_ia32(fpstate); |
385 | if (tmp < 0) | 372 | if (tmp < 0) |
386 | err = -EFAULT; | 373 | err = -EFAULT; |
387 | else { | 374 | else { |
@@ -402,7 +389,8 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
402 | * Determine which stack to use.. | 389 | * Determine which stack to use.. |
403 | */ | 390 | */ |
404 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 391 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
405 | size_t frame_size) | 392 | size_t frame_size, |
393 | void **fpstate) | ||
406 | { | 394 | { |
407 | unsigned long sp; | 395 | unsigned long sp; |
408 | 396 | ||
@@ -421,6 +409,11 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
421 | ka->sa.sa_restorer) | 409 | ka->sa.sa_restorer) |
422 | sp = (unsigned long) ka->sa.sa_restorer; | 410 | sp = (unsigned long) ka->sa.sa_restorer; |
423 | 411 | ||
412 | if (used_math()) { | ||
413 | sp = sp - sig_xstate_ia32_size; | ||
414 | *fpstate = (struct _fpstate_ia32 *) sp; | ||
415 | } | ||
416 | |||
424 | sp -= frame_size; | 417 | sp -= frame_size; |
425 | /* Align the stack pointer according to the i386 ABI, | 418 | /* Align the stack pointer according to the i386 ABI, |
426 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | 419 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ |
@@ -434,6 +427,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
434 | struct sigframe __user *frame; | 427 | struct sigframe __user *frame; |
435 | void __user *restorer; | 428 | void __user *restorer; |
436 | int err = 0; | 429 | int err = 0; |
430 | void __user *fpstate = NULL; | ||
437 | 431 | ||
438 | /* copy_to_user optimizes that into a single 8 byte store */ | 432 | /* copy_to_user optimizes that into a single 8 byte store */ |
439 | static const struct { | 433 | static const struct { |
@@ -448,7 +442,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
448 | 0, | 442 | 0, |
449 | }; | 443 | }; |
450 | 444 | ||
451 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 445 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
452 | 446 | ||
453 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 447 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
454 | goto give_sigsegv; | 448 | goto give_sigsegv; |
@@ -457,8 +451,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
457 | if (err) | 451 | if (err) |
458 | goto give_sigsegv; | 452 | goto give_sigsegv; |
459 | 453 | ||
460 | err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, | 454 | err |= ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]); |
461 | set->sig[0]); | ||
462 | if (err) | 455 | if (err) |
463 | goto give_sigsegv; | 456 | goto give_sigsegv; |
464 | 457 | ||
@@ -522,6 +515,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
522 | struct rt_sigframe __user *frame; | 515 | struct rt_sigframe __user *frame; |
523 | void __user *restorer; | 516 | void __user *restorer; |
524 | int err = 0; | 517 | int err = 0; |
518 | void __user *fpstate = NULL; | ||
525 | 519 | ||
526 | /* __copy_to_user optimizes that into a single 8 byte store */ | 520 | /* __copy_to_user optimizes that into a single 8 byte store */ |
527 | static const struct { | 521 | static const struct { |
@@ -537,7 +531,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
537 | 0, | 531 | 0, |
538 | }; | 532 | }; |
539 | 533 | ||
540 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 534 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
541 | 535 | ||
542 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 536 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
543 | goto give_sigsegv; | 537 | goto give_sigsegv; |
@@ -550,13 +544,16 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
550 | goto give_sigsegv; | 544 | goto give_sigsegv; |
551 | 545 | ||
552 | /* Create the ucontext. */ | 546 | /* Create the ucontext. */ |
553 | err |= __put_user(0, &frame->uc.uc_flags); | 547 | if (cpu_has_xsave) |
548 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
549 | else | ||
550 | err |= __put_user(0, &frame->uc.uc_flags); | ||
554 | err |= __put_user(0, &frame->uc.uc_link); | 551 | err |= __put_user(0, &frame->uc.uc_link); |
555 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 552 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
556 | err |= __put_user(sas_ss_flags(regs->sp), | 553 | err |= __put_user(sas_ss_flags(regs->sp), |
557 | &frame->uc.uc_stack.ss_flags); | 554 | &frame->uc.uc_stack.ss_flags); |
558 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 555 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
559 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 556 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
560 | regs, set->sig[0]); | 557 | regs, set->sig[0]); |
561 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 558 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
562 | if (err) | 559 | if (err) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3db651fc8ec5..c9be69fedb70 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -38,7 +38,7 @@ obj-y += tsc.o io_delay.o rtc.o | |||
38 | 38 | ||
39 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 39 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o |
40 | obj-y += process.o | 40 | obj-y += process.o |
41 | obj-y += i387.o | 41 | obj-y += i387.o xsave.o |
42 | obj-y += ptrace.o | 42 | obj-y += ptrace.o |
43 | obj-y += ds.o | 43 | obj-y += ds.o |
44 | obj-$(CONFIG_X86_32) += tls.o | 44 | obj-$(CONFIG_X86_32) += tls.o |
@@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | |||
69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
71 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o | 71 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o |
72 | obj-$(CONFIG_X86_ES7000) += es7000_32.o | ||
72 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o | 73 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o |
73 | obj-y += vsmp_64.o | 74 | obj-y += vsmp_64.o |
74 | obj-$(CONFIG_KPROBES) += kprobes.o | 75 | obj-$(CONFIG_KPROBES) += kprobes.o |
@@ -104,6 +105,8 @@ obj-$(CONFIG_OLPC) += olpc.o | |||
104 | ifeq ($(CONFIG_X86_64),y) | 105 | ifeq ($(CONFIG_X86_64),y) |
105 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o | 106 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o |
106 | obj-y += bios_uv.o | 107 | obj-y += bios_uv.o |
108 | obj-y += genx2apic_cluster.o | ||
109 | obj-y += genx2apic_phys.o | ||
107 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o | 110 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o |
108 | obj-$(CONFIG_AUDIT) += audit_64.o | 111 | obj-$(CONFIG_AUDIT) += audit_64.o |
109 | 112 | ||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index bfd10fd211cd..27ef365e757d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -775,7 +775,7 @@ static void __init acpi_register_lapic_address(unsigned long address) | |||
775 | 775 | ||
776 | set_fixmap_nocache(FIX_APIC_BASE, address); | 776 | set_fixmap_nocache(FIX_APIC_BASE, address); |
777 | if (boot_cpu_physical_apicid == -1U) { | 777 | if (boot_cpu_physical_apicid == -1U) { |
778 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 778 | boot_cpu_physical_apicid = read_apic_id(); |
779 | #ifdef CONFIG_X86_32 | 779 | #ifdef CONFIG_X86_32 |
780 | apic_version[boot_cpu_physical_apicid] = | 780 | apic_version[boot_cpu_physical_apicid] = |
781 | GET_APIC_VERSION(apic_read(APIC_LVR)); | 781 | GET_APIC_VERSION(apic_read(APIC_LVR)); |
@@ -1351,7 +1351,9 @@ static void __init acpi_process_madt(void) | |||
1351 | acpi_ioapic = 1; | 1351 | acpi_ioapic = 1; |
1352 | 1352 | ||
1353 | smp_found_config = 1; | 1353 | smp_found_config = 1; |
1354 | #ifdef CONFIG_X86_32 | ||
1354 | setup_apic_routing(); | 1355 | setup_apic_routing(); |
1356 | #endif | ||
1355 | } | 1357 | } |
1356 | } | 1358 | } |
1357 | if (error == -EINVAL) { | 1359 | if (error == -EINVAL) { |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2763cb37b553..65a0c1b48696 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
145 | extern char __vsyscall_0; | 145 | extern char __vsyscall_0; |
146 | const unsigned char *const *find_nop_table(void) | 146 | const unsigned char *const *find_nop_table(void) |
147 | { | 147 | { |
148 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 148 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
149 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; | 149 | boot_cpu_has(X86_FEATURE_NOPL)) |
150 | return p6_nops; | ||
151 | else | ||
152 | return k8_nops; | ||
150 | } | 153 | } |
151 | 154 | ||
152 | #else /* CONFIG_X86_64 */ | 155 | #else /* CONFIG_X86_64 */ |
153 | 156 | ||
154 | static const struct nop { | ||
155 | int cpuid; | ||
156 | const unsigned char *const *noptable; | ||
157 | } noptypes[] = { | ||
158 | { X86_FEATURE_K8, k8_nops }, | ||
159 | { X86_FEATURE_K7, k7_nops }, | ||
160 | { X86_FEATURE_P4, p6_nops }, | ||
161 | { X86_FEATURE_P3, p6_nops }, | ||
162 | { -1, NULL } | ||
163 | }; | ||
164 | |||
165 | const unsigned char *const *find_nop_table(void) | 157 | const unsigned char *const *find_nop_table(void) |
166 | { | 158 | { |
167 | const unsigned char *const *noptable = intel_nops; | 159 | if (boot_cpu_has(X86_FEATURE_K8)) |
168 | int i; | 160 | return k8_nops; |
169 | 161 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
170 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | 162 | return k7_nops; |
171 | if (boot_cpu_has(noptypes[i].cpuid)) { | 163 | else if (boot_cpu_has(X86_FEATURE_NOPL)) |
172 | noptable = noptypes[i].noptable; | 164 | return p6_nops; |
173 | break; | 165 | else |
174 | } | 166 | return intel_nops; |
175 | } | ||
176 | return noptable; | ||
177 | } | 167 | } |
178 | 168 | ||
179 | #endif /* CONFIG_X86_64 */ | 169 | #endif /* CONFIG_X86_64 */ |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index f88bd0d982b0..584272105051 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -145,13 +145,18 @@ static int modern_apic(void) | |||
145 | return lapic_get_version() >= 0x14; | 145 | return lapic_get_version() >= 0x14; |
146 | } | 146 | } |
147 | 147 | ||
148 | void apic_wait_icr_idle(void) | 148 | /* |
149 | * Paravirt kernels also might be using these below ops. So we still | ||
150 | * use generic apic_read()/apic_write(), which might be pointing to different | ||
151 | * ops in PARAVIRT case. | ||
152 | */ | ||
153 | void xapic_wait_icr_idle(void) | ||
149 | { | 154 | { |
150 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) | 155 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) |
151 | cpu_relax(); | 156 | cpu_relax(); |
152 | } | 157 | } |
153 | 158 | ||
154 | u32 safe_apic_wait_icr_idle(void) | 159 | u32 safe_xapic_wait_icr_idle(void) |
155 | { | 160 | { |
156 | u32 send_status; | 161 | u32 send_status; |
157 | int timeout; | 162 | int timeout; |
@@ -167,16 +172,48 @@ u32 safe_apic_wait_icr_idle(void) | |||
167 | return send_status; | 172 | return send_status; |
168 | } | 173 | } |
169 | 174 | ||
175 | void xapic_icr_write(u32 low, u32 id) | ||
176 | { | ||
177 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); | ||
178 | apic_write(APIC_ICR, low); | ||
179 | } | ||
180 | |||
181 | u64 xapic_icr_read(void) | ||
182 | { | ||
183 | u32 icr1, icr2; | ||
184 | |||
185 | icr2 = apic_read(APIC_ICR2); | ||
186 | icr1 = apic_read(APIC_ICR); | ||
187 | |||
188 | return icr1 | ((u64)icr2 << 32); | ||
189 | } | ||
190 | |||
191 | static struct apic_ops xapic_ops = { | ||
192 | .read = native_apic_mem_read, | ||
193 | .write = native_apic_mem_write, | ||
194 | .icr_read = xapic_icr_read, | ||
195 | .icr_write = xapic_icr_write, | ||
196 | .wait_icr_idle = xapic_wait_icr_idle, | ||
197 | .safe_wait_icr_idle = safe_xapic_wait_icr_idle, | ||
198 | }; | ||
199 | |||
200 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; | ||
201 | EXPORT_SYMBOL_GPL(apic_ops); | ||
202 | |||
170 | /** | 203 | /** |
171 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | 204 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 |
172 | */ | 205 | */ |
173 | void __cpuinit enable_NMI_through_LVT0(void) | 206 | void __cpuinit enable_NMI_through_LVT0(void) |
174 | { | 207 | { |
175 | unsigned int v = APIC_DM_NMI; | 208 | unsigned int v; |
176 | 209 | ||
177 | /* Level triggered for 82489DX */ | 210 | /* unmask and set to NMI */ |
211 | v = APIC_DM_NMI; | ||
212 | |||
213 | /* Level triggered for 82489DX (32bit mode) */ | ||
178 | if (!lapic_is_integrated()) | 214 | if (!lapic_is_integrated()) |
179 | v |= APIC_LVT_LEVEL_TRIGGER; | 215 | v |= APIC_LVT_LEVEL_TRIGGER; |
216 | |||
180 | apic_write(APIC_LVT0, v); | 217 | apic_write(APIC_LVT0, v); |
181 | } | 218 | } |
182 | 219 | ||
@@ -193,9 +230,13 @@ int get_physical_broadcast(void) | |||
193 | */ | 230 | */ |
194 | int lapic_get_maxlvt(void) | 231 | int lapic_get_maxlvt(void) |
195 | { | 232 | { |
196 | unsigned int v = apic_read(APIC_LVR); | 233 | unsigned int v; |
197 | 234 | ||
198 | /* 82489DXs do not report # of LVT entries. */ | 235 | v = apic_read(APIC_LVR); |
236 | /* | ||
237 | * - we always have APIC integrated on 64bit mode | ||
238 | * - 82489DXs do not report # of LVT entries | ||
239 | */ | ||
199 | return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; | 240 | return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; |
200 | } | 241 | } |
201 | 242 | ||
@@ -1205,7 +1246,7 @@ void __init init_apic_mappings(void) | |||
1205 | * default configuration (or the MP table is broken). | 1246 | * default configuration (or the MP table is broken). |
1206 | */ | 1247 | */ |
1207 | if (boot_cpu_physical_apicid == -1U) | 1248 | if (boot_cpu_physical_apicid == -1U) |
1208 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1249 | boot_cpu_physical_apicid = read_apic_id(); |
1209 | 1250 | ||
1210 | } | 1251 | } |
1211 | 1252 | ||
@@ -1242,7 +1283,7 @@ int __init APIC_init_uniprocessor(void) | |||
1242 | * might be zero if read from MP tables. Get it from LAPIC. | 1283 | * might be zero if read from MP tables. Get it from LAPIC. |
1243 | */ | 1284 | */ |
1244 | #ifdef CONFIG_CRASH_DUMP | 1285 | #ifdef CONFIG_CRASH_DUMP |
1245 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1286 | boot_cpu_physical_apicid = read_apic_id(); |
1246 | #endif | 1287 | #endif |
1247 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); | 1288 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
1248 | 1289 | ||
@@ -1321,54 +1362,6 @@ void smp_error_interrupt(struct pt_regs *regs) | |||
1321 | irq_exit(); | 1362 | irq_exit(); |
1322 | } | 1363 | } |
1323 | 1364 | ||
1324 | #ifdef CONFIG_SMP | ||
1325 | void __init smp_intr_init(void) | ||
1326 | { | ||
1327 | /* | ||
1328 | * IRQ0 must be given a fixed assignment and initialized, | ||
1329 | * because it's used before the IO-APIC is set up. | ||
1330 | */ | ||
1331 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | ||
1332 | |||
1333 | /* | ||
1334 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
1335 | * IPI, driven by wakeup. | ||
1336 | */ | ||
1337 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
1338 | |||
1339 | /* IPI for invalidation */ | ||
1340 | alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | ||
1341 | |||
1342 | /* IPI for generic function call */ | ||
1343 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
1344 | |||
1345 | /* IPI for single call function */ | ||
1346 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
1347 | call_function_single_interrupt); | ||
1348 | } | ||
1349 | #endif | ||
1350 | |||
1351 | /* | ||
1352 | * Initialize APIC interrupts | ||
1353 | */ | ||
1354 | void __init apic_intr_init(void) | ||
1355 | { | ||
1356 | #ifdef CONFIG_SMP | ||
1357 | smp_intr_init(); | ||
1358 | #endif | ||
1359 | /* self generated IPI for local APIC timer */ | ||
1360 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
1361 | |||
1362 | /* IPI vectors for APIC spurious and error interrupts */ | ||
1363 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | ||
1364 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
1365 | |||
1366 | /* thermal monitor LVT interrupt */ | ||
1367 | #ifdef CONFIG_X86_MCE_P4THERMAL | ||
1368 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
1369 | #endif | ||
1370 | } | ||
1371 | |||
1372 | /** | 1365 | /** |
1373 | * connect_bsp_APIC - attach the APIC to the interrupt system | 1366 | * connect_bsp_APIC - attach the APIC to the interrupt system |
1374 | */ | 1367 | */ |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 446c062e831c..1a6011855af3 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/clockchips.h> | 27 | #include <linux/clockchips.h> |
28 | #include <linux/acpi_pmtmr.h> | 28 | #include <linux/acpi_pmtmr.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/dmar.h> | ||
30 | 31 | ||
31 | #include <asm/atomic.h> | 32 | #include <asm/atomic.h> |
32 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/proto.h> | 40 | #include <asm/proto.h> |
40 | #include <asm/timex.h> | 41 | #include <asm/timex.h> |
41 | #include <asm/apic.h> | 42 | #include <asm/apic.h> |
43 | #include <asm/i8259.h> | ||
42 | 44 | ||
43 | #include <mach_ipi.h> | 45 | #include <mach_ipi.h> |
44 | #include <mach_apic.h> | 46 | #include <mach_apic.h> |
@@ -46,6 +48,11 @@ | |||
46 | static int disable_apic_timer __cpuinitdata; | 48 | static int disable_apic_timer __cpuinitdata; |
47 | static int apic_calibrate_pmtmr __initdata; | 49 | static int apic_calibrate_pmtmr __initdata; |
48 | int disable_apic; | 50 | int disable_apic; |
51 | int disable_x2apic; | ||
52 | int x2apic; | ||
53 | |||
54 | /* x2apic enabled before OS handover */ | ||
55 | int x2apic_preenabled; | ||
49 | 56 | ||
50 | /* Local APIC timer works in C2 */ | 57 | /* Local APIC timer works in C2 */ |
51 | int local_apic_timer_c2_ok; | 58 | int local_apic_timer_c2_ok; |
@@ -118,13 +125,13 @@ static int modern_apic(void) | |||
118 | return lapic_get_version() >= 0x14; | 125 | return lapic_get_version() >= 0x14; |
119 | } | 126 | } |
120 | 127 | ||
121 | void apic_wait_icr_idle(void) | 128 | void xapic_wait_icr_idle(void) |
122 | { | 129 | { |
123 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) | 130 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) |
124 | cpu_relax(); | 131 | cpu_relax(); |
125 | } | 132 | } |
126 | 133 | ||
127 | u32 safe_apic_wait_icr_idle(void) | 134 | u32 safe_xapic_wait_icr_idle(void) |
128 | { | 135 | { |
129 | u32 send_status; | 136 | u32 send_status; |
130 | int timeout; | 137 | int timeout; |
@@ -140,6 +147,69 @@ u32 safe_apic_wait_icr_idle(void) | |||
140 | return send_status; | 147 | return send_status; |
141 | } | 148 | } |
142 | 149 | ||
150 | void xapic_icr_write(u32 low, u32 id) | ||
151 | { | ||
152 | apic_write(APIC_ICR2, id << 24); | ||
153 | apic_write(APIC_ICR, low); | ||
154 | } | ||
155 | |||
156 | u64 xapic_icr_read(void) | ||
157 | { | ||
158 | u32 icr1, icr2; | ||
159 | |||
160 | icr2 = apic_read(APIC_ICR2); | ||
161 | icr1 = apic_read(APIC_ICR); | ||
162 | |||
163 | return (icr1 | ((u64)icr2 << 32)); | ||
164 | } | ||
165 | |||
166 | static struct apic_ops xapic_ops = { | ||
167 | .read = native_apic_mem_read, | ||
168 | .write = native_apic_mem_write, | ||
169 | .icr_read = xapic_icr_read, | ||
170 | .icr_write = xapic_icr_write, | ||
171 | .wait_icr_idle = xapic_wait_icr_idle, | ||
172 | .safe_wait_icr_idle = safe_xapic_wait_icr_idle, | ||
173 | }; | ||
174 | |||
175 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; | ||
176 | |||
177 | EXPORT_SYMBOL_GPL(apic_ops); | ||
178 | |||
179 | static void x2apic_wait_icr_idle(void) | ||
180 | { | ||
181 | /* no need to wait for icr idle in x2apic */ | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | static u32 safe_x2apic_wait_icr_idle(void) | ||
186 | { | ||
187 | /* no need to wait for icr idle in x2apic */ | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | void x2apic_icr_write(u32 low, u32 id) | ||
192 | { | ||
193 | wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); | ||
194 | } | ||
195 | |||
196 | u64 x2apic_icr_read(void) | ||
197 | { | ||
198 | unsigned long val; | ||
199 | |||
200 | rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); | ||
201 | return val; | ||
202 | } | ||
203 | |||
204 | static struct apic_ops x2apic_ops = { | ||
205 | .read = native_apic_msr_read, | ||
206 | .write = native_apic_msr_write, | ||
207 | .icr_read = x2apic_icr_read, | ||
208 | .icr_write = x2apic_icr_write, | ||
209 | .wait_icr_idle = x2apic_wait_icr_idle, | ||
210 | .safe_wait_icr_idle = safe_x2apic_wait_icr_idle, | ||
211 | }; | ||
212 | |||
143 | /** | 213 | /** |
144 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | 214 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 |
145 | */ | 215 | */ |
@@ -149,6 +219,11 @@ void __cpuinit enable_NMI_through_LVT0(void) | |||
149 | 219 | ||
150 | /* unmask and set to NMI */ | 220 | /* unmask and set to NMI */ |
151 | v = APIC_DM_NMI; | 221 | v = APIC_DM_NMI; |
222 | |||
223 | /* Level triggered for 82489DX (32bit mode) */ | ||
224 | if (!lapic_is_integrated()) | ||
225 | v |= APIC_LVT_LEVEL_TRIGGER; | ||
226 | |||
152 | apic_write(APIC_LVT0, v); | 227 | apic_write(APIC_LVT0, v); |
153 | } | 228 | } |
154 | 229 | ||
@@ -157,11 +232,14 @@ void __cpuinit enable_NMI_through_LVT0(void) | |||
157 | */ | 232 | */ |
158 | int lapic_get_maxlvt(void) | 233 | int lapic_get_maxlvt(void) |
159 | { | 234 | { |
160 | unsigned int v, maxlvt; | 235 | unsigned int v; |
161 | 236 | ||
162 | v = apic_read(APIC_LVR); | 237 | v = apic_read(APIC_LVR); |
163 | maxlvt = GET_APIC_MAXLVT(v); | 238 | /* |
164 | return maxlvt; | 239 | * - we always have APIC integrated on 64bit mode |
240 | * - 82489DXs do not report # of LVT entries | ||
241 | */ | ||
242 | return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2; | ||
165 | } | 243 | } |
166 | 244 | ||
167 | /* | 245 | /* |
@@ -629,10 +707,10 @@ int __init verify_local_APIC(void) | |||
629 | /* | 707 | /* |
630 | * The ID register is read/write in a real APIC. | 708 | * The ID register is read/write in a real APIC. |
631 | */ | 709 | */ |
632 | reg0 = read_apic_id(); | 710 | reg0 = apic_read(APIC_ID); |
633 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); | 711 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); |
634 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); | 712 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); |
635 | reg1 = read_apic_id(); | 713 | reg1 = apic_read(APIC_ID); |
636 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); | 714 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); |
637 | apic_write(APIC_ID, reg0); | 715 | apic_write(APIC_ID, reg0); |
638 | if (reg1 != (reg0 ^ APIC_ID_MASK)) | 716 | if (reg1 != (reg0 ^ APIC_ID_MASK)) |
@@ -833,6 +911,125 @@ void __cpuinit end_local_APIC_setup(void) | |||
833 | apic_pm_activate(); | 911 | apic_pm_activate(); |
834 | } | 912 | } |
835 | 913 | ||
914 | void check_x2apic(void) | ||
915 | { | ||
916 | int msr, msr2; | ||
917 | |||
918 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
919 | |||
920 | if (msr & X2APIC_ENABLE) { | ||
921 | printk("x2apic enabled by BIOS, switching to x2apic ops\n"); | ||
922 | x2apic_preenabled = x2apic = 1; | ||
923 | apic_ops = &x2apic_ops; | ||
924 | } | ||
925 | } | ||
926 | |||
927 | void enable_x2apic(void) | ||
928 | { | ||
929 | int msr, msr2; | ||
930 | |||
931 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
932 | if (!(msr & X2APIC_ENABLE)) { | ||
933 | printk("Enabling x2apic\n"); | ||
934 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); | ||
935 | } | ||
936 | } | ||
937 | |||
938 | void enable_IR_x2apic(void) | ||
939 | { | ||
940 | #ifdef CONFIG_INTR_REMAP | ||
941 | int ret; | ||
942 | unsigned long flags; | ||
943 | |||
944 | if (!cpu_has_x2apic) | ||
945 | return; | ||
946 | |||
947 | if (!x2apic_preenabled && disable_x2apic) { | ||
948 | printk(KERN_INFO | ||
949 | "Skipped enabling x2apic and Interrupt-remapping " | ||
950 | "because of nox2apic\n"); | ||
951 | return; | ||
952 | } | ||
953 | |||
954 | if (x2apic_preenabled && disable_x2apic) | ||
955 | panic("Bios already enabled x2apic, can't enforce nox2apic"); | ||
956 | |||
957 | if (!x2apic_preenabled && skip_ioapic_setup) { | ||
958 | printk(KERN_INFO | ||
959 | "Skipped enabling x2apic and Interrupt-remapping " | ||
960 | "because of skipping io-apic setup\n"); | ||
961 | return; | ||
962 | } | ||
963 | |||
964 | ret = dmar_table_init(); | ||
965 | if (ret) { | ||
966 | printk(KERN_INFO | ||
967 | "dmar_table_init() failed with %d:\n", ret); | ||
968 | |||
969 | if (x2apic_preenabled) | ||
970 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
971 | else | ||
972 | printk(KERN_INFO | ||
973 | "Not enabling x2apic,Intr-remapping\n"); | ||
974 | return; | ||
975 | } | ||
976 | |||
977 | local_irq_save(flags); | ||
978 | mask_8259A(); | ||
979 | save_mask_IO_APIC_setup(); | ||
980 | |||
981 | ret = enable_intr_remapping(1); | ||
982 | |||
983 | if (ret && x2apic_preenabled) { | ||
984 | local_irq_restore(flags); | ||
985 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
986 | } | ||
987 | |||
988 | if (ret) | ||
989 | goto end; | ||
990 | |||
991 | if (!x2apic) { | ||
992 | x2apic = 1; | ||
993 | apic_ops = &x2apic_ops; | ||
994 | enable_x2apic(); | ||
995 | } | ||
996 | end: | ||
997 | if (ret) | ||
998 | /* | ||
999 | * IR enabling failed | ||
1000 | */ | ||
1001 | restore_IO_APIC_setup(); | ||
1002 | else | ||
1003 | reinit_intr_remapped_IO_APIC(x2apic_preenabled); | ||
1004 | |||
1005 | unmask_8259A(); | ||
1006 | local_irq_restore(flags); | ||
1007 | |||
1008 | if (!ret) { | ||
1009 | if (!x2apic_preenabled) | ||
1010 | printk(KERN_INFO | ||
1011 | "Enabled x2apic and interrupt-remapping\n"); | ||
1012 | else | ||
1013 | printk(KERN_INFO | ||
1014 | "Enabled Interrupt-remapping\n"); | ||
1015 | } else | ||
1016 | printk(KERN_ERR | ||
1017 | "Failed to enable Interrupt-remapping and x2apic\n"); | ||
1018 | #else | ||
1019 | if (!cpu_has_x2apic) | ||
1020 | return; | ||
1021 | |||
1022 | if (x2apic_preenabled) | ||
1023 | panic("x2apic enabled prior OS handover," | ||
1024 | " enable CONFIG_INTR_REMAP"); | ||
1025 | |||
1026 | printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping " | ||
1027 | " and x2apic\n"); | ||
1028 | #endif | ||
1029 | |||
1030 | return; | ||
1031 | } | ||
1032 | |||
836 | /* | 1033 | /* |
837 | * Detect and enable local APICs on non-SMP boards. | 1034 | * Detect and enable local APICs on non-SMP boards. |
838 | * Original code written by Keir Fraser. | 1035 | * Original code written by Keir Fraser. |
@@ -872,7 +1069,7 @@ void __init early_init_lapic_mapping(void) | |||
872 | * Fetch the APIC ID of the BSP in case we have a | 1069 | * Fetch the APIC ID of the BSP in case we have a |
873 | * default configuration (or the MP table is broken). | 1070 | * default configuration (or the MP table is broken). |
874 | */ | 1071 | */ |
875 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1072 | boot_cpu_physical_apicid = read_apic_id(); |
876 | } | 1073 | } |
877 | 1074 | ||
878 | /** | 1075 | /** |
@@ -880,6 +1077,11 @@ void __init early_init_lapic_mapping(void) | |||
880 | */ | 1077 | */ |
881 | void __init init_apic_mappings(void) | 1078 | void __init init_apic_mappings(void) |
882 | { | 1079 | { |
1080 | if (x2apic) { | ||
1081 | boot_cpu_physical_apicid = read_apic_id(); | ||
1082 | return; | ||
1083 | } | ||
1084 | |||
883 | /* | 1085 | /* |
884 | * If no local APIC can be found then set up a fake all | 1086 | * If no local APIC can be found then set up a fake all |
885 | * zeroes page to simulate the local APIC and another | 1087 | * zeroes page to simulate the local APIC and another |
@@ -899,7 +1101,7 @@ void __init init_apic_mappings(void) | |||
899 | * Fetch the APIC ID of the BSP in case we have a | 1101 | * Fetch the APIC ID of the BSP in case we have a |
900 | * default configuration (or the MP table is broken). | 1102 | * default configuration (or the MP table is broken). |
901 | */ | 1103 | */ |
902 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1104 | boot_cpu_physical_apicid = read_apic_id(); |
903 | } | 1105 | } |
904 | 1106 | ||
905 | /* | 1107 | /* |
@@ -918,6 +1120,9 @@ int __init APIC_init_uniprocessor(void) | |||
918 | return -1; | 1120 | return -1; |
919 | } | 1121 | } |
920 | 1122 | ||
1123 | enable_IR_x2apic(); | ||
1124 | setup_apic_routing(); | ||
1125 | |||
921 | verify_local_APIC(); | 1126 | verify_local_APIC(); |
922 | 1127 | ||
923 | connect_bsp_APIC(); | 1128 | connect_bsp_APIC(); |
@@ -1093,6 +1298,11 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1093 | cpu_set(cpu, cpu_present_map); | 1298 | cpu_set(cpu, cpu_present_map); |
1094 | } | 1299 | } |
1095 | 1300 | ||
1301 | int hard_smp_processor_id(void) | ||
1302 | { | ||
1303 | return read_apic_id(); | ||
1304 | } | ||
1305 | |||
1096 | /* | 1306 | /* |
1097 | * Power management | 1307 | * Power management |
1098 | */ | 1308 | */ |
@@ -1129,7 +1339,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
1129 | 1339 | ||
1130 | maxlvt = lapic_get_maxlvt(); | 1340 | maxlvt = lapic_get_maxlvt(); |
1131 | 1341 | ||
1132 | apic_pm_state.apic_id = read_apic_id(); | 1342 | apic_pm_state.apic_id = apic_read(APIC_ID); |
1133 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | 1343 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); |
1134 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | 1344 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); |
1135 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | 1345 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); |
@@ -1164,10 +1374,14 @@ static int lapic_resume(struct sys_device *dev) | |||
1164 | maxlvt = lapic_get_maxlvt(); | 1374 | maxlvt = lapic_get_maxlvt(); |
1165 | 1375 | ||
1166 | local_irq_save(flags); | 1376 | local_irq_save(flags); |
1167 | rdmsr(MSR_IA32_APICBASE, l, h); | 1377 | if (!x2apic) { |
1168 | l &= ~MSR_IA32_APICBASE_BASE; | 1378 | rdmsr(MSR_IA32_APICBASE, l, h); |
1169 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | 1379 | l &= ~MSR_IA32_APICBASE_BASE; |
1170 | wrmsr(MSR_IA32_APICBASE, l, h); | 1380 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; |
1381 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
1382 | } else | ||
1383 | enable_x2apic(); | ||
1384 | |||
1171 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | 1385 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); |
1172 | apic_write(APIC_ID, apic_pm_state.apic_id); | 1386 | apic_write(APIC_ID, apic_pm_state.apic_id); |
1173 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | 1387 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); |
@@ -1307,6 +1521,15 @@ __cpuinit int apic_is_clustered_box(void) | |||
1307 | return (clusters > 2); | 1521 | return (clusters > 2); |
1308 | } | 1522 | } |
1309 | 1523 | ||
1524 | static __init int setup_nox2apic(char *str) | ||
1525 | { | ||
1526 | disable_x2apic = 1; | ||
1527 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC); | ||
1528 | return 0; | ||
1529 | } | ||
1530 | early_param("nox2apic", setup_nox2apic); | ||
1531 | |||
1532 | |||
1310 | /* | 1533 | /* |
1311 | * APIC command line parameters | 1534 | * APIC command line parameters |
1312 | */ | 1535 | */ |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index aa89387006fe..505543a75a56 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | #define __NO_STUBS 1 | 23 | #define __NO_STUBS 1 |
24 | #undef __SYSCALL | 24 | #undef __SYSCALL |
25 | #undef _ASM_X86_64_UNISTD_H_ | 25 | #undef ASM_X86__UNISTD_64_H |
26 | #define __SYSCALL(nr, sym) [nr] = 1, | 26 | #define __SYSCALL(nr, sym) [nr] = 1, |
27 | static char syscalls[] = { | 27 | static char syscalls[] = { |
28 | #include <asm/unistd.h> | 28 | #include <asm/unistd.h> |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index ee76eaad3001..403e689df0b8 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -3,22 +3,32 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
6 | obj-y += proc.o feature_names.o | 6 | obj-y += proc.o capflags.o powerflags.o |
7 | 7 | ||
8 | obj-$(CONFIG_X86_32) += common.o bugs.o | 8 | obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o |
9 | obj-$(CONFIG_X86_64) += common_64.o bugs_64.o | 9 | obj-$(CONFIG_X86_64) += common_64.o bugs_64.o |
10 | obj-$(CONFIG_X86_32) += amd.o | 10 | |
11 | obj-$(CONFIG_X86_64) += amd_64.o | 11 | obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o |
12 | obj-$(CONFIG_X86_32) += cyrix.o | 12 | obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o |
13 | obj-$(CONFIG_X86_32) += centaur.o | 13 | obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o |
14 | obj-$(CONFIG_X86_64) += centaur_64.o | 14 | obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o |
15 | obj-$(CONFIG_X86_32) += transmeta.o | 15 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
16 | obj-$(CONFIG_X86_32) += intel.o | 16 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o |
17 | obj-$(CONFIG_X86_64) += intel_64.o | 17 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o |
18 | obj-$(CONFIG_X86_32) += umc.o | 18 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
19 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_X86_MCE) += mcheck/ | 21 | obj-$(CONFIG_X86_MCE) += mcheck/ |
21 | obj-$(CONFIG_MTRR) += mtrr/ | 22 | obj-$(CONFIG_MTRR) += mtrr/ |
22 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 23 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
23 | 24 | ||
24 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 25 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
26 | |||
27 | quiet_cmd_mkcapflags = MKCAP $@ | ||
28 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ | ||
29 | |||
30 | cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h | ||
31 | |||
32 | targets += capflags.c | ||
33 | $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE | ||
34 | $(call if_changed,mkcapflags) | ||
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index a6ef672adbba..0d9c993aa93e 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <asm/pat.h> | 7 | #include <asm/pat.h> |
8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
9 | 9 | ||
10 | #include <mach_apic.h> | ||
11 | |||
10 | struct cpuid_bit { | 12 | struct cpuid_bit { |
11 | u16 feature; | 13 | u16 feature; |
12 | u8 reg; | 14 | u8 reg; |
@@ -48,6 +50,92 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
48 | } | 50 | } |
49 | } | 51 | } |
50 | 52 | ||
53 | /* leaf 0xb SMT level */ | ||
54 | #define SMT_LEVEL 0 | ||
55 | |||
56 | /* leaf 0xb sub-leaf types */ | ||
57 | #define INVALID_TYPE 0 | ||
58 | #define SMT_TYPE 1 | ||
59 | #define CORE_TYPE 2 | ||
60 | |||
61 | #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) | ||
62 | #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) | ||
63 | #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) | ||
64 | |||
65 | /* | ||
66 | * Check for extended topology enumeration cpuid leaf 0xb and if it | ||
67 | * exists, use it for populating initial_apicid and cpu topology | ||
68 | * detection. | ||
69 | */ | ||
70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | ||
71 | { | ||
72 | #ifdef CONFIG_SMP | ||
73 | unsigned int eax, ebx, ecx, edx, sub_index; | ||
74 | unsigned int ht_mask_width, core_plus_mask_width; | ||
75 | unsigned int core_select_mask, core_level_siblings; | ||
76 | |||
77 | if (c->cpuid_level < 0xb) | ||
78 | return; | ||
79 | |||
80 | cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); | ||
81 | |||
82 | /* | ||
83 | * check if the cpuid leaf 0xb is actually implemented. | ||
84 | */ | ||
85 | if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) | ||
86 | return; | ||
87 | |||
88 | set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); | ||
89 | |||
90 | /* | ||
91 | * initial apic id, which also represents 32-bit extended x2apic id. | ||
92 | */ | ||
93 | c->initial_apicid = edx; | ||
94 | |||
95 | /* | ||
96 | * Populate HT related information from sub-leaf level 0. | ||
97 | */ | ||
98 | core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); | ||
99 | core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); | ||
100 | |||
101 | sub_index = 1; | ||
102 | do { | ||
103 | cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); | ||
104 | |||
105 | /* | ||
106 | * Check for the Core type in the implemented sub leaves. | ||
107 | */ | ||
108 | if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { | ||
109 | core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); | ||
110 | core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); | ||
111 | break; | ||
112 | } | ||
113 | |||
114 | sub_index++; | ||
115 | } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); | ||
116 | |||
117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; | ||
118 | |||
119 | #ifdef CONFIG_X86_32 | ||
120 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) | ||
121 | & core_select_mask; | ||
122 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); | ||
123 | #else | ||
124 | c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; | ||
125 | c->phys_proc_id = phys_pkg_id(core_plus_mask_width); | ||
126 | #endif | ||
127 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | ||
128 | |||
129 | |||
130 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
131 | c->phys_proc_id); | ||
132 | if (c->x86_max_cores > 1) | ||
133 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
134 | c->cpu_core_id); | ||
135 | return; | ||
136 | #endif | ||
137 | } | ||
138 | |||
51 | #ifdef CONFIG_X86_PAT | 139 | #ifdef CONFIG_X86_PAT |
52 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | 140 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) |
53 | { | 141 | { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index cae9cabc3031..d64ea6097ca7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
31 | if (c->x86_power & (1<<8)) | 31 | if (c->x86_power & (1<<8)) |
32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
33 | } | 33 | } |
34 | |||
35 | /* Set MTRR capability flag if appropriate */ | ||
36 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
37 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
38 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
34 | } | 39 | } |
35 | 40 | ||
36 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 41 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
166 | mbytes); | 171 | mbytes); |
167 | } | 172 | } |
168 | 173 | ||
169 | /* Set MTRR capability flag if appropriate */ | ||
170 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
171 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
172 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
173 | break; | 174 | break; |
174 | } | 175 | } |
175 | 176 | ||
@@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { | |||
297 | .c_early_init = early_init_amd, | 298 | .c_early_init = early_init_amd, |
298 | .c_init = init_amd, | 299 | .c_init = init_amd, |
299 | .c_size_cache = amd_size_cache, | 300 | .c_size_cache = amd_size_cache, |
301 | .c_x86_vendor = X86_VENDOR_AMD, | ||
300 | }; | 302 | }; |
301 | 303 | ||
302 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | 304 | cpu_dev_register(amd_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c index d1692b2a41ff..d1c721c0c49f 100644 --- a/arch/x86/kernel/cpu/amd_64.c +++ b/arch/x86/kernel/cpu/amd_64.c | |||
@@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { | |||
218 | .c_ident = { "AuthenticAMD" }, | 218 | .c_ident = { "AuthenticAMD" }, |
219 | .c_early_init = early_init_amd, | 219 | .c_early_init = early_init_amd, |
220 | .c_init = init_amd, | 220 | .c_init = init_amd, |
221 | .c_x86_vendor = X86_VENDOR_AMD, | ||
221 | }; | 222 | }; |
222 | 223 | ||
223 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | 224 | cpu_dev_register(amd_cpu_dev); |
224 | |||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index e0f45edd6a55..e5f6d89521bf 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -314,6 +314,16 @@ enum { | |||
314 | EAMD3D = 1<<20, | 314 | EAMD3D = 1<<20, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
318 | { | ||
319 | switch (c->x86) { | ||
320 | case 5: | ||
321 | /* Emulate MTRRs using Centaur's MCR. */ | ||
322 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); | ||
323 | break; | ||
324 | } | ||
325 | } | ||
326 | |||
317 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 327 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
318 | { | 328 | { |
319 | 329 | ||
@@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
462 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 472 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { |
463 | .c_vendor = "Centaur", | 473 | .c_vendor = "Centaur", |
464 | .c_ident = { "CentaurHauls" }, | 474 | .c_ident = { "CentaurHauls" }, |
475 | .c_early_init = early_init_centaur, | ||
465 | .c_init = init_centaur, | 476 | .c_init = init_centaur, |
466 | .c_size_cache = centaur_size_cache, | 477 | .c_size_cache = centaur_size_cache, |
478 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
467 | }; | 479 | }; |
468 | 480 | ||
469 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); | 481 | cpu_dev_register(centaur_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c index 1d181c40e2e1..49cfc6d2f2fb 100644 --- a/arch/x86/kernel/cpu/centaur_64.c +++ b/arch/x86/kernel/cpu/centaur_64.c | |||
@@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
29 | .c_ident = { "CentaurHauls" }, | 29 | .c_ident = { "CentaurHauls" }, |
30 | .c_early_init = early_init_centaur, | 30 | .c_early_init = early_init_centaur, |
31 | .c_init = init_centaur, | 31 | .c_init = init_centaur, |
32 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); | 35 | cpu_dev_register(centaur_cpu_dev); |
35 | 36 | ||
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/kernel/cpu/cmpxchg.c new file mode 100644 index 000000000000..2056ccf572cc --- /dev/null +++ b/arch/x86/kernel/cpu/cmpxchg.c | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * cmpxchg*() fallbacks for CPU not supporting these instructions | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | #ifndef CONFIG_X86_CMPXCHG | ||
10 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
11 | { | ||
12 | u8 prev; | ||
13 | unsigned long flags; | ||
14 | |||
15 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
16 | local_irq_save(flags); | ||
17 | prev = *(u8 *)ptr; | ||
18 | if (prev == old) | ||
19 | *(u8 *)ptr = new; | ||
20 | local_irq_restore(flags); | ||
21 | return prev; | ||
22 | } | ||
23 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
24 | |||
25 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
26 | { | ||
27 | u16 prev; | ||
28 | unsigned long flags; | ||
29 | |||
30 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
31 | local_irq_save(flags); | ||
32 | prev = *(u16 *)ptr; | ||
33 | if (prev == old) | ||
34 | *(u16 *)ptr = new; | ||
35 | local_irq_restore(flags); | ||
36 | return prev; | ||
37 | } | ||
38 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
39 | |||
40 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
41 | { | ||
42 | u32 prev; | ||
43 | unsigned long flags; | ||
44 | |||
45 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
46 | local_irq_save(flags); | ||
47 | prev = *(u32 *)ptr; | ||
48 | if (prev == old) | ||
49 | *(u32 *)ptr = new; | ||
50 | local_irq_restore(flags); | ||
51 | return prev; | ||
52 | } | ||
53 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
54 | #endif | ||
55 | |||
56 | #ifndef CONFIG_X86_CMPXCHG64 | ||
57 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
58 | { | ||
59 | u64 prev; | ||
60 | unsigned long flags; | ||
61 | |||
62 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
63 | local_irq_save(flags); | ||
64 | prev = *(u64 *)ptr; | ||
65 | if (prev == old) | ||
66 | *(u64 *)ptr = new; | ||
67 | local_irq_restore(flags); | ||
68 | return prev; | ||
69 | } | ||
70 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
71 | #endif | ||
72 | |||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80ab20d4fa39..7d5a07f0fd24 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/mtrr.h> | 13 | #include <asm/mtrr.h> |
14 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
15 | #include <asm/pat.h> | 15 | #include <asm/pat.h> |
16 | #include <asm/asm.h> | ||
16 | #ifdef CONFIG_X86_LOCAL_APIC | 17 | #ifdef CONFIG_X86_LOCAL_APIC |
17 | #include <asm/mpspec.h> | 18 | #include <asm/mpspec.h> |
18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
@@ -21,7 +22,9 @@ | |||
21 | 22 | ||
22 | #include "cpu.h" | 23 | #include "cpu.h" |
23 | 24 | ||
24 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 25 | static struct cpu_dev *this_cpu __cpuinitdata; |
26 | |||
27 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
25 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 28 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
26 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 29 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
27 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 30 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
@@ -57,12 +60,124 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | |||
57 | } }; | 60 | } }; |
58 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 61 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
59 | 62 | ||
60 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
61 | |||
62 | static int cachesize_override __cpuinitdata = -1; | 63 | static int cachesize_override __cpuinitdata = -1; |
63 | static int disable_x86_serial_nr __cpuinitdata = 1; | 64 | static int disable_x86_serial_nr __cpuinitdata = 1; |
64 | 65 | ||
65 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 66 | static int __init cachesize_setup(char *str) |
67 | { | ||
68 | get_option(&str, &cachesize_override); | ||
69 | return 1; | ||
70 | } | ||
71 | __setup("cachesize=", cachesize_setup); | ||
72 | |||
73 | /* | ||
74 | * Naming convention should be: <Name> [(<Codename>)] | ||
75 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
76 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
77 | * | ||
78 | */ | ||
79 | |||
80 | /* Look up CPU names by table lookup. */ | ||
81 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
82 | { | ||
83 | struct cpu_model_info *info; | ||
84 | |||
85 | if (c->x86_model >= 16) | ||
86 | return NULL; /* Range check */ | ||
87 | |||
88 | if (!this_cpu) | ||
89 | return NULL; | ||
90 | |||
91 | info = this_cpu->c_models; | ||
92 | |||
93 | while (info && info->family) { | ||
94 | if (info->family == c->x86) | ||
95 | return info->model_names[c->x86_model]; | ||
96 | info++; | ||
97 | } | ||
98 | return NULL; /* Not found */ | ||
99 | } | ||
100 | |||
101 | static int __init x86_fxsr_setup(char *s) | ||
102 | { | ||
103 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
104 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
105 | return 1; | ||
106 | } | ||
107 | __setup("nofxsr", x86_fxsr_setup); | ||
108 | |||
109 | static int __init x86_sep_setup(char *s) | ||
110 | { | ||
111 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
112 | return 1; | ||
113 | } | ||
114 | __setup("nosep", x86_sep_setup); | ||
115 | |||
116 | /* Standard macro to see if a specific flag is changeable */ | ||
117 | static inline int flag_is_changeable_p(u32 flag) | ||
118 | { | ||
119 | u32 f1, f2; | ||
120 | |||
121 | asm("pushfl\n\t" | ||
122 | "pushfl\n\t" | ||
123 | "popl %0\n\t" | ||
124 | "movl %0,%1\n\t" | ||
125 | "xorl %2,%0\n\t" | ||
126 | "pushl %0\n\t" | ||
127 | "popfl\n\t" | ||
128 | "pushfl\n\t" | ||
129 | "popl %0\n\t" | ||
130 | "popfl\n\t" | ||
131 | : "=&r" (f1), "=&r" (f2) | ||
132 | : "ir" (flag)); | ||
133 | |||
134 | return ((f1^f2) & flag) != 0; | ||
135 | } | ||
136 | |||
137 | /* Probe for the CPUID instruction */ | ||
138 | static int __cpuinit have_cpuid_p(void) | ||
139 | { | ||
140 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
141 | } | ||
142 | |||
143 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
144 | { | ||
145 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | ||
146 | /* Disable processor serial number */ | ||
147 | unsigned long lo, hi; | ||
148 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
149 | lo |= 0x200000; | ||
150 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
151 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
152 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
153 | |||
154 | /* Disabling the serial number may affect the cpuid level */ | ||
155 | c->cpuid_level = cpuid_eax(0); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static int __init x86_serial_nr_setup(char *s) | ||
160 | { | ||
161 | disable_x86_serial_nr = 0; | ||
162 | return 1; | ||
163 | } | ||
164 | __setup("serialnumber", x86_serial_nr_setup); | ||
165 | |||
166 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
167 | |||
168 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
169 | * it's on the real one. */ | ||
170 | void switch_to_new_gdt(void) | ||
171 | { | ||
172 | struct desc_ptr gdt_descr; | ||
173 | |||
174 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
175 | gdt_descr.size = GDT_SIZE - 1; | ||
176 | load_gdt(&gdt_descr); | ||
177 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
178 | } | ||
179 | |||
180 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
66 | 181 | ||
67 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 182 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
68 | { | 183 | { |
@@ -80,22 +195,15 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
80 | static struct cpu_dev __cpuinitdata default_cpu = { | 195 | static struct cpu_dev __cpuinitdata default_cpu = { |
81 | .c_init = default_init, | 196 | .c_init = default_init, |
82 | .c_vendor = "Unknown", | 197 | .c_vendor = "Unknown", |
198 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
83 | }; | 199 | }; |
84 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
85 | |||
86 | static int __init cachesize_setup(char *str) | ||
87 | { | ||
88 | get_option(&str, &cachesize_override); | ||
89 | return 1; | ||
90 | } | ||
91 | __setup("cachesize=", cachesize_setup); | ||
92 | 200 | ||
93 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | 201 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
94 | { | 202 | { |
95 | unsigned int *v; | 203 | unsigned int *v; |
96 | char *p, *q; | 204 | char *p, *q; |
97 | 205 | ||
98 | if (cpuid_eax(0x80000000) < 0x80000004) | 206 | if (c->extended_cpuid_level < 0x80000004) |
99 | return 0; | 207 | return 0; |
100 | 208 | ||
101 | v = (unsigned int *) c->x86_model_id; | 209 | v = (unsigned int *) c->x86_model_id; |
@@ -119,24 +227,23 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
119 | return 1; | 227 | return 1; |
120 | } | 228 | } |
121 | 229 | ||
122 | |||
123 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 230 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
124 | { | 231 | { |
125 | unsigned int n, dummy, ecx, edx, l2size; | 232 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
126 | 233 | ||
127 | n = cpuid_eax(0x80000000); | 234 | n = c->extended_cpuid_level; |
128 | 235 | ||
129 | if (n >= 0x80000005) { | 236 | if (n >= 0x80000005) { |
130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 237 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 238 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 239 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
133 | c->x86_cache_size = (ecx>>24)+(edx>>24); | 240 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
134 | } | 241 | } |
135 | 242 | ||
136 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 243 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
137 | return; | 244 | return; |
138 | 245 | ||
139 | ecx = cpuid_ecx(0x80000006); | 246 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
140 | l2size = ecx >> 16; | 247 | l2size = ecx >> 16; |
141 | 248 | ||
142 | /* do processor-specific cache resizing */ | 249 | /* do processor-specific cache resizing */ |
@@ -153,112 +260,90 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
153 | c->x86_cache_size = l2size; | 260 | c->x86_cache_size = l2size; |
154 | 261 | ||
155 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 262 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
156 | l2size, ecx & 0xFF); | 263 | l2size, ecx & 0xFF); |
157 | } | 264 | } |
158 | 265 | ||
159 | /* | 266 | #ifdef CONFIG_X86_HT |
160 | * Naming convention should be: <Name> [(<Codename>)] | 267 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
161 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
162 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
163 | * | ||
164 | */ | ||
165 | |||
166 | /* Look up CPU names by table lookup. */ | ||
167 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
168 | { | 268 | { |
169 | struct cpu_model_info *info; | 269 | u32 eax, ebx, ecx, edx; |
270 | int index_msb, core_bits; | ||
170 | 271 | ||
171 | if (c->x86_model >= 16) | 272 | if (!cpu_has(c, X86_FEATURE_HT)) |
172 | return NULL; /* Range check */ | 273 | return; |
173 | 274 | ||
174 | if (!this_cpu) | 275 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
175 | return NULL; | 276 | goto out; |
176 | 277 | ||
177 | info = this_cpu->c_models; | 278 | cpuid(1, &eax, &ebx, &ecx, &edx); |
178 | 279 | ||
179 | while (info && info->family) { | 280 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
180 | if (info->family == c->x86) | 281 | |
181 | return info->model_names[c->x86_model]; | 282 | if (smp_num_siblings == 1) { |
182 | info++; | 283 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
284 | } else if (smp_num_siblings > 1) { | ||
285 | |||
286 | if (smp_num_siblings > NR_CPUS) { | ||
287 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | ||
288 | smp_num_siblings); | ||
289 | smp_num_siblings = 1; | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | index_msb = get_count_order(smp_num_siblings); | ||
294 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
295 | |||
296 | |||
297 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
298 | |||
299 | index_msb = get_count_order(smp_num_siblings); | ||
300 | |||
301 | core_bits = get_count_order(c->x86_max_cores); | ||
302 | |||
303 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
304 | ((1 << core_bits) - 1); | ||
183 | } | 305 | } |
184 | return NULL; /* Not found */ | ||
185 | } | ||
186 | 306 | ||
307 | out: | ||
308 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
309 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
310 | c->phys_proc_id); | ||
311 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
312 | c->cpu_core_id); | ||
313 | } | ||
314 | } | ||
315 | #endif | ||
187 | 316 | ||
188 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 317 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
189 | { | 318 | { |
190 | char *v = c->x86_vendor_id; | 319 | char *v = c->x86_vendor_id; |
191 | int i; | 320 | int i; |
192 | static int printed; | 321 | static int printed; |
193 | 322 | ||
194 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 323 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
195 | if (cpu_devs[i]) { | 324 | if (!cpu_devs[i]) |
196 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 325 | break; |
197 | (cpu_devs[i]->c_ident[1] && | 326 | |
198 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 327 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
199 | c->x86_vendor = i; | 328 | (cpu_devs[i]->c_ident[1] && |
200 | if (!early) | 329 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
201 | this_cpu = cpu_devs[i]; | 330 | this_cpu = cpu_devs[i]; |
202 | return; | 331 | c->x86_vendor = this_cpu->c_x86_vendor; |
203 | } | 332 | return; |
204 | } | 333 | } |
205 | } | 334 | } |
335 | |||
206 | if (!printed) { | 336 | if (!printed) { |
207 | printed++; | 337 | printed++; |
208 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | 338 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); |
209 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 339 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
210 | } | 340 | } |
341 | |||
211 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 342 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
212 | this_cpu = &default_cpu; | 343 | this_cpu = &default_cpu; |
213 | } | 344 | } |
214 | 345 | ||
215 | 346 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |
216 | static int __init x86_fxsr_setup(char *s) | ||
217 | { | ||
218 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
219 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
220 | return 1; | ||
221 | } | ||
222 | __setup("nofxsr", x86_fxsr_setup); | ||
223 | |||
224 | |||
225 | static int __init x86_sep_setup(char *s) | ||
226 | { | ||
227 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
228 | return 1; | ||
229 | } | ||
230 | __setup("nosep", x86_sep_setup); | ||
231 | |||
232 | |||
233 | /* Standard macro to see if a specific flag is changeable */ | ||
234 | static inline int flag_is_changeable_p(u32 flag) | ||
235 | { | ||
236 | u32 f1, f2; | ||
237 | |||
238 | asm("pushfl\n\t" | ||
239 | "pushfl\n\t" | ||
240 | "popl %0\n\t" | ||
241 | "movl %0,%1\n\t" | ||
242 | "xorl %2,%0\n\t" | ||
243 | "pushl %0\n\t" | ||
244 | "popfl\n\t" | ||
245 | "pushfl\n\t" | ||
246 | "popl %0\n\t" | ||
247 | "popfl\n\t" | ||
248 | : "=&r" (f1), "=&r" (f2) | ||
249 | : "ir" (flag)); | ||
250 | |||
251 | return ((f1^f2) & flag) != 0; | ||
252 | } | ||
253 | |||
254 | |||
255 | /* Probe for the CPUID instruction */ | ||
256 | static int __cpuinit have_cpuid_p(void) | ||
257 | { | ||
258 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
259 | } | ||
260 | |||
261 | void __init cpu_detect(struct cpuinfo_x86 *c) | ||
262 | { | 347 | { |
263 | /* Get vendor name */ | 348 | /* Get vendor name */ |
264 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 349 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -267,50 +352,47 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
267 | (unsigned int *)&c->x86_vendor_id[4]); | 352 | (unsigned int *)&c->x86_vendor_id[4]); |
268 | 353 | ||
269 | c->x86 = 4; | 354 | c->x86 = 4; |
355 | /* Intel-defined flags: level 0x00000001 */ | ||
270 | if (c->cpuid_level >= 0x00000001) { | 356 | if (c->cpuid_level >= 0x00000001) { |
271 | u32 junk, tfms, cap0, misc; | 357 | u32 junk, tfms, cap0, misc; |
272 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 358 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
273 | c->x86 = (tfms >> 8) & 15; | 359 | c->x86 = (tfms >> 8) & 0xf; |
274 | c->x86_model = (tfms >> 4) & 15; | 360 | c->x86_model = (tfms >> 4) & 0xf; |
361 | c->x86_mask = tfms & 0xf; | ||
275 | if (c->x86 == 0xf) | 362 | if (c->x86 == 0xf) |
276 | c->x86 += (tfms >> 20) & 0xff; | 363 | c->x86 += (tfms >> 20) & 0xff; |
277 | if (c->x86 >= 0x6) | 364 | if (c->x86 >= 0x6) |
278 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 365 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
279 | c->x86_mask = tfms & 15; | ||
280 | if (cap0 & (1<<19)) { | 366 | if (cap0 & (1<<19)) { |
281 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
282 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 367 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
368 | c->x86_cache_alignment = c->x86_clflush_size; | ||
283 | } | 369 | } |
284 | } | 370 | } |
285 | } | 371 | } |
286 | static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | 372 | |
373 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | ||
287 | { | 374 | { |
288 | u32 tfms, xlvl; | 375 | u32 tfms, xlvl; |
289 | unsigned int ebx; | 376 | u32 ebx; |
290 | 377 | ||
291 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 378 | /* Intel-defined flags: level 0x00000001 */ |
292 | if (have_cpuid_p()) { | 379 | if (c->cpuid_level >= 0x00000001) { |
293 | /* Intel-defined flags: level 0x00000001 */ | 380 | u32 capability, excap; |
294 | if (c->cpuid_level >= 0x00000001) { | 381 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
295 | u32 capability, excap; | 382 | c->x86_capability[0] = capability; |
296 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 383 | c->x86_capability[4] = excap; |
297 | c->x86_capability[0] = capability; | 384 | } |
298 | c->x86_capability[4] = excap; | ||
299 | } | ||
300 | 385 | ||
301 | /* AMD-defined flags: level 0x80000001 */ | 386 | /* AMD-defined flags: level 0x80000001 */ |
302 | xlvl = cpuid_eax(0x80000000); | 387 | xlvl = cpuid_eax(0x80000000); |
303 | if ((xlvl & 0xffff0000) == 0x80000000) { | 388 | c->extended_cpuid_level = xlvl; |
304 | if (xlvl >= 0x80000001) { | 389 | if ((xlvl & 0xffff0000) == 0x80000000) { |
305 | c->x86_capability[1] = cpuid_edx(0x80000001); | 390 | if (xlvl >= 0x80000001) { |
306 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 391 | c->x86_capability[1] = cpuid_edx(0x80000001); |
307 | } | 392 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
308 | } | 393 | } |
309 | |||
310 | } | 394 | } |
311 | |||
312 | } | 395 | } |
313 | |||
314 | /* | 396 | /* |
315 | * Do minimum CPU detection early. | 397 | * Do minimum CPU detection early. |
316 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 398 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
@@ -320,109 +402,114 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
320 | * WARNING: this function is only called on the BP. Don't add code here | 402 | * WARNING: this function is only called on the BP. Don't add code here |
321 | * that is supposed to run on all CPUs. | 403 | * that is supposed to run on all CPUs. |
322 | */ | 404 | */ |
323 | static void __init early_cpu_detect(void) | 405 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
324 | { | 406 | { |
325 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
326 | |||
327 | c->x86_cache_alignment = 32; | ||
328 | c->x86_clflush_size = 32; | 407 | c->x86_clflush_size = 32; |
408 | c->x86_cache_alignment = c->x86_clflush_size; | ||
329 | 409 | ||
330 | if (!have_cpuid_p()) | 410 | if (!have_cpuid_p()) |
331 | return; | 411 | return; |
332 | 412 | ||
413 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
414 | |||
415 | c->extended_cpuid_level = 0; | ||
416 | |||
333 | cpu_detect(c); | 417 | cpu_detect(c); |
334 | 418 | ||
335 | get_cpu_vendor(c, 1); | 419 | get_cpu_vendor(c); |
420 | |||
421 | get_cpu_cap(c); | ||
336 | 422 | ||
337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 423 | if (this_cpu->c_early_init) |
338 | cpu_devs[c->x86_vendor]->c_early_init) | 424 | this_cpu->c_early_init(c); |
339 | cpu_devs[c->x86_vendor]->c_early_init(c); | ||
340 | 425 | ||
341 | early_get_cap(c); | 426 | validate_pat_support(c); |
342 | } | 427 | } |
343 | 428 | ||
344 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 429 | void __init early_cpu_init(void) |
345 | { | 430 | { |
346 | u32 tfms, xlvl; | 431 | struct cpu_dev **cdev; |
347 | unsigned int ebx; | 432 | int count = 0; |
348 | 433 | ||
349 | if (have_cpuid_p()) { | 434 | printk("KERNEL supported cpus:\n"); |
350 | /* Get vendor name */ | 435 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
351 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 436 | struct cpu_dev *cpudev = *cdev; |
352 | (unsigned int *)&c->x86_vendor_id[0], | 437 | unsigned int j; |
353 | (unsigned int *)&c->x86_vendor_id[8], | 438 | |
354 | (unsigned int *)&c->x86_vendor_id[4]); | 439 | if (count >= X86_VENDOR_NUM) |
355 | 440 | break; | |
356 | get_cpu_vendor(c, 0); | 441 | cpu_devs[count] = cpudev; |
357 | /* Initialize the standard set of capabilities */ | 442 | count++; |
358 | /* Note that the vendor-specific code below might override */ | 443 | |
359 | /* Intel-defined flags: level 0x00000001 */ | 444 | for (j = 0; j < 2; j++) { |
360 | if (c->cpuid_level >= 0x00000001) { | 445 | if (!cpudev->c_ident[j]) |
361 | u32 capability, excap; | 446 | continue; |
362 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 447 | printk(" %s %s\n", cpudev->c_vendor, |
363 | c->x86_capability[0] = capability; | 448 | cpudev->c_ident[j]); |
364 | c->x86_capability[4] = excap; | ||
365 | c->x86 = (tfms >> 8) & 15; | ||
366 | c->x86_model = (tfms >> 4) & 15; | ||
367 | if (c->x86 == 0xf) | ||
368 | c->x86 += (tfms >> 20) & 0xff; | ||
369 | if (c->x86 >= 0x6) | ||
370 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
371 | c->x86_mask = tfms & 15; | ||
372 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
373 | #ifdef CONFIG_X86_HT | ||
374 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
375 | c->phys_proc_id = c->initial_apicid; | ||
376 | #else | ||
377 | c->apicid = c->initial_apicid; | ||
378 | #endif | ||
379 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
380 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | ||
381 | } else { | ||
382 | /* Have CPUID level 0 only - unheard of */ | ||
383 | c->x86 = 4; | ||
384 | } | ||
385 | |||
386 | /* AMD-defined flags: level 0x80000001 */ | ||
387 | xlvl = cpuid_eax(0x80000000); | ||
388 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
389 | if (xlvl >= 0x80000001) { | ||
390 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
391 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
392 | } | ||
393 | if (xlvl >= 0x80000004) | ||
394 | get_model_name(c); /* Default name */ | ||
395 | } | 449 | } |
396 | |||
397 | init_scattered_cpuid_features(c); | ||
398 | } | 450 | } |
399 | 451 | ||
452 | early_identify_cpu(&boot_cpu_data); | ||
400 | } | 453 | } |
401 | 454 | ||
402 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 455 | /* |
456 | * The NOPL instruction is supposed to exist on all CPUs with | ||
457 | * family >= 6, unfortunately, that's not true in practice because | ||
458 | * of early VIA chips and (more importantly) broken virtualizers that | ||
459 | * are not easy to detect. Hence, probe for it based on first | ||
460 | * principles. | ||
461 | */ | ||
462 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
403 | { | 463 | { |
404 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | 464 | const u32 nopl_signature = 0x888c53b1; /* Random number */ |
405 | /* Disable processor serial number */ | 465 | u32 has_nopl = nopl_signature; |
406 | unsigned long lo, hi; | 466 | |
407 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 467 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
408 | lo |= 0x200000; | 468 | if (c->x86 >= 6) { |
409 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 469 | asm volatile("\n" |
410 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 470 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ |
411 | clear_cpu_cap(c, X86_FEATURE_PN); | 471 | "2:\n" |
412 | 472 | " .section .fixup,\"ax\"\n" | |
413 | /* Disabling the serial number may affect the cpuid level */ | 473 | "3: xor %0,%0\n" |
414 | c->cpuid_level = cpuid_eax(0); | 474 | " jmp 2b\n" |
475 | " .previous\n" | ||
476 | _ASM_EXTABLE(1b,3b) | ||
477 | : "+a" (has_nopl)); | ||
478 | |||
479 | if (has_nopl == nopl_signature) | ||
480 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
415 | } | 481 | } |
416 | } | 482 | } |
417 | 483 | ||
418 | static int __init x86_serial_nr_setup(char *s) | 484 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
419 | { | 485 | { |
420 | disable_x86_serial_nr = 0; | 486 | if (!have_cpuid_p()) |
421 | return 1; | 487 | return; |
422 | } | ||
423 | __setup("serialnumber", x86_serial_nr_setup); | ||
424 | 488 | ||
489 | c->extended_cpuid_level = 0; | ||
425 | 490 | ||
491 | cpu_detect(c); | ||
492 | |||
493 | get_cpu_vendor(c); | ||
494 | |||
495 | get_cpu_cap(c); | ||
496 | |||
497 | if (c->cpuid_level >= 0x00000001) { | ||
498 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | ||
499 | #ifdef CONFIG_X86_HT | ||
500 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
501 | c->phys_proc_id = c->initial_apicid; | ||
502 | #else | ||
503 | c->apicid = c->initial_apicid; | ||
504 | #endif | ||
505 | } | ||
506 | |||
507 | if (c->extended_cpuid_level >= 0x80000004) | ||
508 | get_model_name(c); /* Default name */ | ||
509 | |||
510 | init_scattered_cpuid_features(c); | ||
511 | detect_nopl(c); | ||
512 | } | ||
426 | 513 | ||
427 | /* | 514 | /* |
428 | * This does the hard work of actually picking apart the CPU stuff... | 515 | * This does the hard work of actually picking apart the CPU stuff... |
@@ -499,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
499 | */ | 586 | */ |
500 | if (c != &boot_cpu_data) { | 587 | if (c != &boot_cpu_data) { |
501 | /* AND the already accumulated flags with these */ | 588 | /* AND the already accumulated flags with these */ |
502 | for (i = 0 ; i < NCAPINTS ; i++) | 589 | for (i = 0; i < NCAPINTS; i++) |
503 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 590 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
504 | } | 591 | } |
505 | 592 | ||
@@ -528,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
528 | mtrr_ap_init(); | 615 | mtrr_ap_init(); |
529 | } | 616 | } |
530 | 617 | ||
531 | #ifdef CONFIG_X86_HT | 618 | struct msr_range { |
532 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 619 | unsigned min; |
533 | { | 620 | unsigned max; |
534 | u32 eax, ebx, ecx, edx; | 621 | }; |
535 | int index_msb, core_bits; | ||
536 | |||
537 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
538 | |||
539 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
540 | return; | ||
541 | |||
542 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
543 | 622 | ||
544 | if (smp_num_siblings == 1) { | 623 | static struct msr_range msr_range_array[] __cpuinitdata = { |
545 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 624 | { 0x00000000, 0x00000418}, |
546 | } else if (smp_num_siblings > 1) { | 625 | { 0xc0000000, 0xc000040b}, |
626 | { 0xc0010000, 0xc0010142}, | ||
627 | { 0xc0011000, 0xc001103b}, | ||
628 | }; | ||
547 | 629 | ||
548 | if (smp_num_siblings > NR_CPUS) { | 630 | static void __cpuinit print_cpu_msr(void) |
549 | printk(KERN_WARNING "CPU: Unsupported number of the " | 631 | { |
550 | "siblings %d", smp_num_siblings); | 632 | unsigned index; |
551 | smp_num_siblings = 1; | 633 | u64 val; |
552 | return; | 634 | int i; |
635 | unsigned index_min, index_max; | ||
636 | |||
637 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
638 | index_min = msr_range_array[i].min; | ||
639 | index_max = msr_range_array[i].max; | ||
640 | for (index = index_min; index < index_max; index++) { | ||
641 | if (rdmsrl_amd_safe(index, &val)) | ||
642 | continue; | ||
643 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
553 | } | 644 | } |
645 | } | ||
646 | } | ||
554 | 647 | ||
555 | index_msb = get_count_order(smp_num_siblings); | 648 | static int show_msr __cpuinitdata; |
556 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | 649 | static __init int setup_show_msr(char *arg) |
557 | 650 | { | |
558 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 651 | int num; |
559 | c->phys_proc_id); | ||
560 | |||
561 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
562 | |||
563 | index_msb = get_count_order(smp_num_siblings) ; | ||
564 | |||
565 | core_bits = get_count_order(c->x86_max_cores); | ||
566 | 652 | ||
567 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | 653 | get_option(&arg, &num); |
568 | ((1 << core_bits) - 1); | ||
569 | 654 | ||
570 | if (c->x86_max_cores > 1) | 655 | if (num > 0) |
571 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 656 | show_msr = num; |
572 | c->cpu_core_id); | 657 | return 1; |
573 | } | ||
574 | } | 658 | } |
575 | #endif | 659 | __setup("show_msr=", setup_show_msr); |
576 | 660 | ||
577 | static __init int setup_noclflush(char *arg) | 661 | static __init int setup_noclflush(char *arg) |
578 | { | 662 | { |
@@ -591,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
591 | vendor = c->x86_vendor_id; | 675 | vendor = c->x86_vendor_id; |
592 | 676 | ||
593 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | 677 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
594 | printk("%s ", vendor); | 678 | printk(KERN_CONT "%s ", vendor); |
595 | 679 | ||
596 | if (!c->x86_model_id[0]) | 680 | if (c->x86_model_id[0]) |
597 | printk("%d86", c->x86); | 681 | printk(KERN_CONT "%s", c->x86_model_id); |
598 | else | 682 | else |
599 | printk("%s", c->x86_model_id); | 683 | printk(KERN_CONT "%d86", c->x86); |
600 | 684 | ||
601 | if (c->x86_mask || c->cpuid_level >= 0) | 685 | if (c->x86_mask || c->cpuid_level >= 0) |
602 | printk(" stepping %02x\n", c->x86_mask); | 686 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
603 | else | 687 | else |
604 | printk("\n"); | 688 | printk(KERN_CONT "\n"); |
689 | |||
690 | #ifdef CONFIG_SMP | ||
691 | if (c->cpu_index < show_msr) | ||
692 | print_cpu_msr(); | ||
693 | #else | ||
694 | if (show_msr) | ||
695 | print_cpu_msr(); | ||
696 | #endif | ||
605 | } | 697 | } |
606 | 698 | ||
607 | static __init int setup_disablecpuid(char *arg) | 699 | static __init int setup_disablecpuid(char *arg) |
@@ -617,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
617 | 709 | ||
618 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 710 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
619 | 711 | ||
620 | void __init early_cpu_init(void) | ||
621 | { | ||
622 | struct cpu_vendor_dev *cvdev; | ||
623 | |||
624 | for (cvdev = __x86cpuvendor_start ; | ||
625 | cvdev < __x86cpuvendor_end ; | ||
626 | cvdev++) | ||
627 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
628 | |||
629 | early_cpu_detect(); | ||
630 | validate_pat_support(&boot_cpu_data); | ||
631 | } | ||
632 | |||
633 | /* Make sure %fs is initialized properly in idle threads */ | 712 | /* Make sure %fs is initialized properly in idle threads */ |
634 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 713 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
635 | { | 714 | { |
@@ -638,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
638 | return regs; | 717 | return regs; |
639 | } | 718 | } |
640 | 719 | ||
641 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
642 | * it's on the real one. */ | ||
643 | void switch_to_new_gdt(void) | ||
644 | { | ||
645 | struct desc_ptr gdt_descr; | ||
646 | |||
647 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
648 | gdt_descr.size = GDT_SIZE - 1; | ||
649 | load_gdt(&gdt_descr); | ||
650 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
651 | } | ||
652 | |||
653 | /* | 720 | /* |
654 | * cpu_init() initializes state that is per-CPU. Some data is already | 721 | * cpu_init() initializes state that is per-CPU. Some data is already |
655 | * initialized (naturally) in the bootstrap process, such as the GDT | 722 | * initialized (naturally) in the bootstrap process, such as the GDT |
@@ -709,9 +776,20 @@ void __cpuinit cpu_init(void) | |||
709 | /* | 776 | /* |
710 | * Force FPU initialization: | 777 | * Force FPU initialization: |
711 | */ | 778 | */ |
712 | current_thread_info()->status = 0; | 779 | if (cpu_has_xsave) |
780 | current_thread_info()->status = TS_XSAVE; | ||
781 | else | ||
782 | current_thread_info()->status = 0; | ||
713 | clear_used_math(); | 783 | clear_used_math(); |
714 | mxcsr_feature_mask_init(); | 784 | mxcsr_feature_mask_init(); |
785 | |||
786 | /* | ||
787 | * Boot processor to setup the FP and extended state context info. | ||
788 | */ | ||
789 | if (!smp_processor_id()) | ||
790 | init_thread_xstate(); | ||
791 | |||
792 | xsave_init(); | ||
715 | } | 793 | } |
716 | 794 | ||
717 | #ifdef CONFIG_HOTPLUG_CPU | 795 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index dd6e3f15017e..bcb48ce05d23 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/mtrr.h> | 18 | #include <asm/mtrr.h> |
19 | #include <asm/mce.h> | 19 | #include <asm/mce.h> |
20 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
21 | #include <asm/asm.h> | ||
21 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
22 | #ifdef CONFIG_X86_LOCAL_APIC | 23 | #ifdef CONFIG_X86_LOCAL_APIC |
23 | #include <asm/mpspec.h> | 24 | #include <asm/mpspec.h> |
@@ -36,6 +37,8 @@ | |||
36 | 37 | ||
37 | #include "cpu.h" | 38 | #include "cpu.h" |
38 | 39 | ||
40 | static struct cpu_dev *this_cpu __cpuinitdata; | ||
41 | |||
39 | /* We need valid kernel segments for data and code in long mode too | 42 | /* We need valid kernel segments for data and code in long mode too |
40 | * IRET will check the segment types kkeil 2000/10/28 | 43 | * IRET will check the segment types kkeil 2000/10/28 |
41 | * Also sysret mandates a special GDT layout | 44 | * Also sysret mandates a special GDT layout |
@@ -65,7 +68,7 @@ void switch_to_new_gdt(void) | |||
65 | load_gdt(&gdt_descr); | 68 | load_gdt(&gdt_descr); |
66 | } | 69 | } |
67 | 70 | ||
68 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 71 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
69 | 72 | ||
70 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 73 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
71 | { | 74 | { |
@@ -75,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
75 | static struct cpu_dev __cpuinitdata default_cpu = { | 78 | static struct cpu_dev __cpuinitdata default_cpu = { |
76 | .c_init = default_init, | 79 | .c_init = default_init, |
77 | .c_vendor = "Unknown", | 80 | .c_vendor = "Unknown", |
81 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
78 | }; | 82 | }; |
79 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
80 | 83 | ||
81 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | 84 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
82 | { | 85 | { |
83 | unsigned int *v; | 86 | unsigned int *v; |
87 | char *p, *q; | ||
84 | 88 | ||
85 | if (c->extended_cpuid_level < 0x80000004) | 89 | if (c->extended_cpuid_level < 0x80000004) |
86 | return 0; | 90 | return 0; |
@@ -90,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
90 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | 94 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
91 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | 95 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
92 | c->x86_model_id[48] = 0; | 96 | c->x86_model_id[48] = 0; |
97 | |||
98 | /* Intel chips right-justify this string for some dumb reason; | ||
99 | undo that brain damage */ | ||
100 | p = q = &c->x86_model_id[0]; | ||
101 | while (*p == ' ') | ||
102 | p++; | ||
103 | if (p != q) { | ||
104 | while (*p) | ||
105 | *q++ = *p++; | ||
106 | while (q <= &c->x86_model_id[48]) | ||
107 | *q++ = '\0'; /* Zero-pad the rest */ | ||
108 | } | ||
109 | |||
93 | return 1; | 110 | return 1; |
94 | } | 111 | } |
95 | 112 | ||
96 | 113 | ||
97 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 114 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
98 | { | 115 | { |
99 | unsigned int n, dummy, ebx, ecx, edx; | 116 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
100 | 117 | ||
101 | n = c->extended_cpuid_level; | 118 | n = c->extended_cpuid_level; |
102 | 119 | ||
103 | if (n >= 0x80000005) { | 120 | if (n >= 0x80000005) { |
104 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | 121 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
105 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | 122 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
106 | "D cache %dK (%d bytes/line)\n", | 123 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
107 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
108 | c->x86_cache_size = (ecx>>24) + (edx>>24); | 124 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
109 | /* On K8 L1 TLB is inclusive, so don't count it */ | 125 | /* On K8 L1 TLB is inclusive, so don't count it */ |
110 | c->x86_tlbsize = 0; | 126 | c->x86_tlbsize = 0; |
111 | } | 127 | } |
112 | 128 | ||
113 | if (n >= 0x80000006) { | 129 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
114 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | 130 | return; |
115 | ecx = cpuid_ecx(0x80000006); | ||
116 | c->x86_cache_size = ecx >> 16; | ||
117 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
118 | 131 | ||
119 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 132 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
120 | c->x86_cache_size, ecx & 0xFF); | 133 | l2size = ecx >> 16; |
121 | } | 134 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); |
135 | |||
136 | c->x86_cache_size = l2size; | ||
137 | |||
138 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
139 | l2size, ecx & 0xFF); | ||
122 | } | 140 | } |
123 | 141 | ||
124 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 142 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
@@ -127,14 +145,16 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
127 | u32 eax, ebx, ecx, edx; | 145 | u32 eax, ebx, ecx, edx; |
128 | int index_msb, core_bits; | 146 | int index_msb, core_bits; |
129 | 147 | ||
130 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
131 | |||
132 | |||
133 | if (!cpu_has(c, X86_FEATURE_HT)) | 148 | if (!cpu_has(c, X86_FEATURE_HT)) |
134 | return; | 149 | return; |
135 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | 150 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
136 | goto out; | 151 | goto out; |
137 | 152 | ||
153 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) | ||
154 | return; | ||
155 | |||
156 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
157 | |||
138 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 158 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
139 | 159 | ||
140 | if (smp_num_siblings == 1) { | 160 | if (smp_num_siblings == 1) { |
@@ -142,8 +162,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
142 | } else if (smp_num_siblings > 1) { | 162 | } else if (smp_num_siblings > 1) { |
143 | 163 | ||
144 | if (smp_num_siblings > NR_CPUS) { | 164 | if (smp_num_siblings > NR_CPUS) { |
145 | printk(KERN_WARNING "CPU: Unsupported number of " | 165 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", |
146 | "siblings %d", smp_num_siblings); | 166 | smp_num_siblings); |
147 | smp_num_siblings = 1; | 167 | smp_num_siblings = 1; |
148 | return; | 168 | return; |
149 | } | 169 | } |
@@ -160,6 +180,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
160 | c->cpu_core_id = phys_pkg_id(index_msb) & | 180 | c->cpu_core_id = phys_pkg_id(index_msb) & |
161 | ((1 << core_bits) - 1); | 181 | ((1 << core_bits) - 1); |
162 | } | 182 | } |
183 | |||
163 | out: | 184 | out: |
164 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 185 | if ((c->x86_max_cores * smp_num_siblings) > 1) { |
165 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 186 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
@@ -167,7 +188,6 @@ out: | |||
167 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 188 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
168 | c->cpu_core_id); | 189 | c->cpu_core_id); |
169 | } | 190 | } |
170 | |||
171 | #endif | 191 | #endif |
172 | } | 192 | } |
173 | 193 | ||
@@ -178,111 +198,70 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
178 | static int printed; | 198 | static int printed; |
179 | 199 | ||
180 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 200 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
181 | if (cpu_devs[i]) { | 201 | if (!cpu_devs[i]) |
182 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 202 | break; |
183 | (cpu_devs[i]->c_ident[1] && | 203 | |
184 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 204 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
185 | c->x86_vendor = i; | 205 | (cpu_devs[i]->c_ident[1] && |
186 | this_cpu = cpu_devs[i]; | 206 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
187 | return; | 207 | this_cpu = cpu_devs[i]; |
188 | } | 208 | c->x86_vendor = this_cpu->c_x86_vendor; |
209 | return; | ||
189 | } | 210 | } |
190 | } | 211 | } |
212 | |||
191 | if (!printed) { | 213 | if (!printed) { |
192 | printed++; | 214 | printed++; |
193 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | 215 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); |
194 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 216 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
195 | } | 217 | } |
196 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
197 | } | ||
198 | |||
199 | static void __init early_cpu_support_print(void) | ||
200 | { | ||
201 | int i,j; | ||
202 | struct cpu_dev *cpu_devx; | ||
203 | |||
204 | printk("KERNEL supported cpus:\n"); | ||
205 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
206 | cpu_devx = cpu_devs[i]; | ||
207 | if (!cpu_devx) | ||
208 | continue; | ||
209 | for (j = 0; j < 2; j++) { | ||
210 | if (!cpu_devx->c_ident[j]) | ||
211 | continue; | ||
212 | printk(" %s %s\n", cpu_devx->c_vendor, | ||
213 | cpu_devx->c_ident[j]); | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
219 | |||
220 | void __init early_cpu_init(void) | ||
221 | { | ||
222 | struct cpu_vendor_dev *cvdev; | ||
223 | 218 | ||
224 | for (cvdev = __x86cpuvendor_start ; | 219 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
225 | cvdev < __x86cpuvendor_end ; | 220 | this_cpu = &default_cpu; |
226 | cvdev++) | ||
227 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
228 | early_cpu_support_print(); | ||
229 | early_identify_cpu(&boot_cpu_data); | ||
230 | } | 221 | } |
231 | 222 | ||
232 | /* Do some early cpuid on the boot CPU to get some parameter that are | 223 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) |
233 | needed before check_bugs. Everything advanced is in identify_cpu | ||
234 | below. */ | ||
235 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | ||
236 | { | 224 | { |
237 | u32 tfms, xlvl; | ||
238 | |||
239 | c->loops_per_jiffy = loops_per_jiffy; | ||
240 | c->x86_cache_size = -1; | ||
241 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
242 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
243 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
244 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
245 | c->x86_clflush_size = 64; | ||
246 | c->x86_cache_alignment = c->x86_clflush_size; | ||
247 | c->x86_max_cores = 1; | ||
248 | c->x86_coreid_bits = 0; | ||
249 | c->extended_cpuid_level = 0; | ||
250 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
251 | |||
252 | /* Get vendor name */ | 225 | /* Get vendor name */ |
253 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 226 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
254 | (unsigned int *)&c->x86_vendor_id[0], | 227 | (unsigned int *)&c->x86_vendor_id[0], |
255 | (unsigned int *)&c->x86_vendor_id[8], | 228 | (unsigned int *)&c->x86_vendor_id[8], |
256 | (unsigned int *)&c->x86_vendor_id[4]); | 229 | (unsigned int *)&c->x86_vendor_id[4]); |
257 | 230 | ||
258 | get_cpu_vendor(c); | 231 | c->x86 = 4; |
259 | |||
260 | /* Initialize the standard set of capabilities */ | ||
261 | /* Note that the vendor-specific code below might override */ | ||
262 | |||
263 | /* Intel-defined flags: level 0x00000001 */ | 232 | /* Intel-defined flags: level 0x00000001 */ |
264 | if (c->cpuid_level >= 0x00000001) { | 233 | if (c->cpuid_level >= 0x00000001) { |
265 | __u32 misc; | 234 | u32 junk, tfms, cap0, misc; |
266 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | 235 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
267 | &c->x86_capability[0]); | ||
268 | c->x86 = (tfms >> 8) & 0xf; | 236 | c->x86 = (tfms >> 8) & 0xf; |
269 | c->x86_model = (tfms >> 4) & 0xf; | 237 | c->x86_model = (tfms >> 4) & 0xf; |
270 | c->x86_mask = tfms & 0xf; | 238 | c->x86_mask = tfms & 0xf; |
271 | if (c->x86 == 0xf) | 239 | if (c->x86 == 0xf) |
272 | c->x86 += (tfms >> 20) & 0xff; | 240 | c->x86 += (tfms >> 20) & 0xff; |
273 | if (c->x86 >= 0x6) | 241 | if (c->x86 >= 0x6) |
274 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 242 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
275 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | 243 | if (cap0 & (1<<19)) { |
276 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 244 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
277 | } else { | 245 | c->x86_cache_alignment = c->x86_clflush_size; |
278 | /* Have CPUID level 0 only - unheard of */ | 246 | } |
279 | c->x86 = 4; | 247 | } |
248 | } | ||
249 | |||
250 | |||
251 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | ||
252 | { | ||
253 | u32 tfms, xlvl; | ||
254 | u32 ebx; | ||
255 | |||
256 | /* Intel-defined flags: level 0x00000001 */ | ||
257 | if (c->cpuid_level >= 0x00000001) { | ||
258 | u32 capability, excap; | ||
259 | |||
260 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | ||
261 | c->x86_capability[0] = capability; | ||
262 | c->x86_capability[4] = excap; | ||
280 | } | 263 | } |
281 | 264 | ||
282 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
283 | #ifdef CONFIG_SMP | ||
284 | c->phys_proc_id = c->initial_apicid; | ||
285 | #endif | ||
286 | /* AMD-defined flags: level 0x80000001 */ | 265 | /* AMD-defined flags: level 0x80000001 */ |
287 | xlvl = cpuid_eax(0x80000000); | 266 | xlvl = cpuid_eax(0x80000000); |
288 | c->extended_cpuid_level = xlvl; | 267 | c->extended_cpuid_level = xlvl; |
@@ -291,8 +270,6 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
291 | c->x86_capability[1] = cpuid_edx(0x80000001); | 270 | c->x86_capability[1] = cpuid_edx(0x80000001); |
292 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 271 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
293 | } | 272 | } |
294 | if (xlvl >= 0x80000004) | ||
295 | get_model_name(c); /* Default name */ | ||
296 | } | 273 | } |
297 | 274 | ||
298 | /* Transmeta-defined flags: level 0x80860001 */ | 275 | /* Transmeta-defined flags: level 0x80860001 */ |
@@ -312,14 +289,114 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
312 | c->x86_virt_bits = (eax >> 8) & 0xff; | 289 | c->x86_virt_bits = (eax >> 8) & 0xff; |
313 | c->x86_phys_bits = eax & 0xff; | 290 | c->x86_phys_bits = eax & 0xff; |
314 | } | 291 | } |
292 | } | ||
293 | |||
294 | /* Do some early cpuid on the boot CPU to get some parameter that are | ||
295 | needed before check_bugs. Everything advanced is in identify_cpu | ||
296 | below. */ | ||
297 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) | ||
298 | { | ||
299 | |||
300 | c->x86_clflush_size = 64; | ||
301 | c->x86_cache_alignment = c->x86_clflush_size; | ||
302 | |||
303 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
304 | |||
305 | c->extended_cpuid_level = 0; | ||
315 | 306 | ||
316 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 307 | cpu_detect(c); |
317 | cpu_devs[c->x86_vendor]->c_early_init) | 308 | |
318 | cpu_devs[c->x86_vendor]->c_early_init(c); | 309 | get_cpu_vendor(c); |
310 | |||
311 | get_cpu_cap(c); | ||
312 | |||
313 | if (this_cpu->c_early_init) | ||
314 | this_cpu->c_early_init(c); | ||
319 | 315 | ||
320 | validate_pat_support(c); | 316 | validate_pat_support(c); |
321 | } | 317 | } |
322 | 318 | ||
319 | void __init early_cpu_init(void) | ||
320 | { | ||
321 | struct cpu_dev **cdev; | ||
322 | int count = 0; | ||
323 | |||
324 | printk("KERNEL supported cpus:\n"); | ||
325 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | ||
326 | struct cpu_dev *cpudev = *cdev; | ||
327 | unsigned int j; | ||
328 | |||
329 | if (count >= X86_VENDOR_NUM) | ||
330 | break; | ||
331 | cpu_devs[count] = cpudev; | ||
332 | count++; | ||
333 | |||
334 | for (j = 0; j < 2; j++) { | ||
335 | if (!cpudev->c_ident[j]) | ||
336 | continue; | ||
337 | printk(" %s %s\n", cpudev->c_vendor, | ||
338 | cpudev->c_ident[j]); | ||
339 | } | ||
340 | } | ||
341 | |||
342 | early_identify_cpu(&boot_cpu_data); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * The NOPL instruction is supposed to exist on all CPUs with | ||
347 | * family >= 6, unfortunately, that's not true in practice because | ||
348 | * of early VIA chips and (more importantly) broken virtualizers that | ||
349 | * are not easy to detect. Hence, probe for it based on first | ||
350 | * principles. | ||
351 | * | ||
352 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
353 | * for consistency with 32 bits, and to make it utterly trivial to | ||
354 | * diagnose the problem should it ever surface. | ||
355 | */ | ||
356 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
357 | { | ||
358 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
359 | u32 has_nopl = nopl_signature; | ||
360 | |||
361 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
362 | if (c->x86 >= 6) { | ||
363 | asm volatile("\n" | ||
364 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
365 | "2:\n" | ||
366 | " .section .fixup,\"ax\"\n" | ||
367 | "3: xor %0,%0\n" | ||
368 | " jmp 2b\n" | ||
369 | " .previous\n" | ||
370 | _ASM_EXTABLE(1b,3b) | ||
371 | : "+a" (has_nopl)); | ||
372 | |||
373 | if (has_nopl == nopl_signature) | ||
374 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | ||
379 | { | ||
380 | c->extended_cpuid_level = 0; | ||
381 | |||
382 | cpu_detect(c); | ||
383 | |||
384 | get_cpu_vendor(c); | ||
385 | |||
386 | get_cpu_cap(c); | ||
387 | |||
388 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
389 | #ifdef CONFIG_SMP | ||
390 | c->phys_proc_id = c->initial_apicid; | ||
391 | #endif | ||
392 | |||
393 | if (c->extended_cpuid_level >= 0x80000004) | ||
394 | get_model_name(c); /* Default name */ | ||
395 | |||
396 | init_scattered_cpuid_features(c); | ||
397 | detect_nopl(c); | ||
398 | } | ||
399 | |||
323 | /* | 400 | /* |
324 | * This does the hard work of actually picking apart the CPU stuff... | 401 | * This does the hard work of actually picking apart the CPU stuff... |
325 | */ | 402 | */ |
@@ -327,9 +404,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
327 | { | 404 | { |
328 | int i; | 405 | int i; |
329 | 406 | ||
330 | early_identify_cpu(c); | 407 | c->loops_per_jiffy = loops_per_jiffy; |
408 | c->x86_cache_size = -1; | ||
409 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
410 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
411 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
412 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
413 | c->x86_max_cores = 1; | ||
414 | c->x86_coreid_bits = 0; | ||
415 | c->x86_clflush_size = 64; | ||
416 | c->x86_cache_alignment = c->x86_clflush_size; | ||
417 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
331 | 418 | ||
332 | init_scattered_cpuid_features(c); | 419 | generic_identify(c); |
333 | 420 | ||
334 | c->apicid = phys_pkg_id(0); | 421 | c->apicid = phys_pkg_id(0); |
335 | 422 | ||
@@ -375,7 +462,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
375 | 462 | ||
376 | } | 463 | } |
377 | 464 | ||
378 | void __cpuinit identify_boot_cpu(void) | 465 | void __init identify_boot_cpu(void) |
379 | { | 466 | { |
380 | identify_cpu(&boot_cpu_data); | 467 | identify_cpu(&boot_cpu_data); |
381 | } | 468 | } |
@@ -387,6 +474,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
387 | mtrr_ap_init(); | 474 | mtrr_ap_init(); |
388 | } | 475 | } |
389 | 476 | ||
477 | struct msr_range { | ||
478 | unsigned min; | ||
479 | unsigned max; | ||
480 | }; | ||
481 | |||
482 | static struct msr_range msr_range_array[] __cpuinitdata = { | ||
483 | { 0x00000000, 0x00000418}, | ||
484 | { 0xc0000000, 0xc000040b}, | ||
485 | { 0xc0010000, 0xc0010142}, | ||
486 | { 0xc0011000, 0xc001103b}, | ||
487 | }; | ||
488 | |||
489 | static void __cpuinit print_cpu_msr(void) | ||
490 | { | ||
491 | unsigned index; | ||
492 | u64 val; | ||
493 | int i; | ||
494 | unsigned index_min, index_max; | ||
495 | |||
496 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
497 | index_min = msr_range_array[i].min; | ||
498 | index_max = msr_range_array[i].max; | ||
499 | for (index = index_min; index < index_max; index++) { | ||
500 | if (rdmsrl_amd_safe(index, &val)) | ||
501 | continue; | ||
502 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
503 | } | ||
504 | } | ||
505 | } | ||
506 | |||
507 | static int show_msr __cpuinitdata; | ||
508 | static __init int setup_show_msr(char *arg) | ||
509 | { | ||
510 | int num; | ||
511 | |||
512 | get_option(&arg, &num); | ||
513 | |||
514 | if (num > 0) | ||
515 | show_msr = num; | ||
516 | return 1; | ||
517 | } | ||
518 | __setup("show_msr=", setup_show_msr); | ||
519 | |||
390 | static __init int setup_noclflush(char *arg) | 520 | static __init int setup_noclflush(char *arg) |
391 | { | 521 | { |
392 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | 522 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); |
@@ -403,6 +533,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
403 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | 533 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
404 | else | 534 | else |
405 | printk(KERN_CONT "\n"); | 535 | printk(KERN_CONT "\n"); |
536 | |||
537 | #ifdef CONFIG_SMP | ||
538 | if (c->cpu_index < show_msr) | ||
539 | print_cpu_msr(); | ||
540 | #else | ||
541 | if (show_msr) | ||
542 | print_cpu_msr(); | ||
543 | #endif | ||
406 | } | 544 | } |
407 | 545 | ||
408 | static __init int setup_disablecpuid(char *arg) | 546 | static __init int setup_disablecpuid(char *arg) |
@@ -493,17 +631,20 @@ void pda_init(int cpu) | |||
493 | /* others are initialized in smpboot.c */ | 631 | /* others are initialized in smpboot.c */ |
494 | pda->pcurrent = &init_task; | 632 | pda->pcurrent = &init_task; |
495 | pda->irqstackptr = boot_cpu_stack; | 633 | pda->irqstackptr = boot_cpu_stack; |
634 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
496 | } else { | 635 | } else { |
497 | pda->irqstackptr = (char *) | 636 | if (!pda->irqstackptr) { |
498 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | 637 | pda->irqstackptr = (char *) |
499 | if (!pda->irqstackptr) | 638 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); |
500 | panic("cannot allocate irqstack for cpu %d", cpu); | 639 | if (!pda->irqstackptr) |
640 | panic("cannot allocate irqstack for cpu %d", | ||
641 | cpu); | ||
642 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
643 | } | ||
501 | 644 | ||
502 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 645 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) |
503 | pda->nodenumber = cpu_to_node(cpu); | 646 | pda->nodenumber = cpu_to_node(cpu); |
504 | } | 647 | } |
505 | |||
506 | pda->irqstackptr += IRQSTACKSIZE-64; | ||
507 | } | 648 | } |
508 | 649 | ||
509 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 650 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + |
@@ -597,23 +738,28 @@ void __cpuinit cpu_init(void) | |||
597 | barrier(); | 738 | barrier(); |
598 | 739 | ||
599 | check_efer(); | 740 | check_efer(); |
741 | if (cpu != 0 && x2apic) | ||
742 | enable_x2apic(); | ||
600 | 743 | ||
601 | /* | 744 | /* |
602 | * set up and load the per-CPU TSS | 745 | * set up and load the per-CPU TSS |
603 | */ | 746 | */ |
604 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 747 | if (!orig_ist->ist[0]) { |
605 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 748 | static const unsigned int order[N_EXCEPTION_STACKS] = { |
606 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 749 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, |
607 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 750 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER |
608 | }; | 751 | }; |
609 | if (cpu) { | 752 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
610 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | 753 | if (cpu) { |
611 | if (!estacks) | 754 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); |
612 | panic("Cannot allocate exception stack %ld %d\n", | 755 | if (!estacks) |
613 | v, cpu); | 756 | panic("Cannot allocate exception " |
757 | "stack %ld %d\n", v, cpu); | ||
758 | } | ||
759 | estacks += PAGE_SIZE << order[v]; | ||
760 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
761 | (unsigned long)estacks; | ||
614 | } | 762 | } |
615 | estacks += PAGE_SIZE << order[v]; | ||
616 | orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; | ||
617 | } | 763 | } |
618 | 764 | ||
619 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 765 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 4d894e8565fe..3cc9d92afd8f 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -21,21 +21,15 @@ struct cpu_dev { | |||
21 | void (*c_init)(struct cpuinfo_x86 * c); | 21 | void (*c_init)(struct cpuinfo_x86 * c); |
22 | void (*c_identify)(struct cpuinfo_x86 * c); | 22 | void (*c_identify)(struct cpuinfo_x86 * c); |
23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); | 23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); |
24 | int c_x86_vendor; | ||
24 | }; | 25 | }; |
25 | 26 | ||
26 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; | 27 | #define cpu_dev_register(cpu_devX) \ |
28 | static struct cpu_dev *__cpu_dev_##cpu_devX __used \ | ||
29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ | ||
30 | &cpu_devX; | ||
27 | 31 | ||
28 | struct cpu_vendor_dev { | 32 | extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; |
29 | int vendor; | ||
30 | struct cpu_dev *cpu_dev; | ||
31 | }; | ||
32 | |||
33 | #define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \ | ||
34 | static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \ | ||
35 | __attribute__((__section__(".x86cpuvendor.init"))) = \ | ||
36 | { cpu_vendor_id, cpu_dev } | ||
37 | |||
38 | extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[]; | ||
39 | 33 | ||
40 | extern int get_model_name(struct cpuinfo_x86 *c); | 34 | extern int get_model_name(struct cpuinfo_x86 *c); |
41 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 35 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index e710a21bb6e8..3f8c7283d816 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -15,13 +15,11 @@ | |||
15 | /* | 15 | /* |
16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
17 | */ | 17 | */ |
18 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 18 | static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | unsigned long flags; | ||
22 | 21 | ||
23 | /* we test for DEVID by checking whether CCR3 is writable */ | 22 | /* we test for DEVID by checking whether CCR3 is writable */ |
24 | local_irq_save(flags); | ||
25 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
26 | setCx86(CX86_CCR3, ccr3 ^ 0x80); | 24 | setCx86(CX86_CCR3, ccr3 ^ 0x80); |
27 | getCx86(0xc0); /* dummy to change bus */ | 25 | getCx86(0xc0); /* dummy to change bus */ |
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
44 | *dir0 = getCx86(CX86_DIR0); | 42 | *dir0 = getCx86(CX86_DIR0); |
45 | *dir1 = getCx86(CX86_DIR1); | 43 | *dir1 = getCx86(CX86_DIR1); |
46 | } | 44 | } |
47 | local_irq_restore(flags); | ||
48 | } | 45 | } |
49 | 46 | ||
47 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | |||
51 | local_irq_save(flags); | ||
52 | __do_cyrix_devid(dir0, dir1); | ||
53 | local_irq_restore(flags); | ||
54 | } | ||
50 | /* | 55 | /* |
51 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in | 56 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in |
52 | * order to identify the Cyrix CPU model after we're out of setup.c | 57 | * order to identify the Cyrix CPU model after we're out of setup.c |
@@ -116,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void) | |||
116 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
117 | 122 | ||
118 | /* Load/Store Serialize to mem access disable (=reorder it) */ | 123 | /* Load/Store Serialize to mem access disable (=reorder it) */ |
119 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); | 124 | setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); |
120 | /* set load/store serialize from 1GB to 4GB */ | 125 | /* set load/store serialize from 1GB to 4GB */ |
121 | ccr3 |= 0xe0; | 126 | ccr3 |= 0xe0; |
122 | setCx86(CX86_CCR3, ccr3); | 127 | setCx86(CX86_CCR3, ccr3); |
@@ -127,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void) | |||
127 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
128 | 133 | ||
129 | /* CCR2 bit 2: unlock NW bit */ | 134 | /* CCR2 bit 2: unlock NW bit */ |
130 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); | 135 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); |
131 | /* set 'Not Write-through' */ | 136 | /* set 'Not Write-through' */ |
132 | write_cr0(read_cr0() | X86_CR0_NW); | 137 | write_cr0(read_cr0() | X86_CR0_NW); |
133 | /* CCR2 bit 2: lock NW bit and set WT1 */ | 138 | /* CCR2 bit 2: lock NW bit and set WT1 */ |
134 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); | 139 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); |
135 | } | 140 | } |
136 | 141 | ||
137 | /* | 142 | /* |
@@ -145,14 +150,14 @@ static void __cpuinit geode_configure(void) | |||
145 | local_irq_save(flags); | 150 | local_irq_save(flags); |
146 | 151 | ||
147 | /* Suspend on halt power saving and enable #SUSP pin */ | 152 | /* Suspend on halt power saving and enable #SUSP pin */ |
148 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); | 153 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); |
149 | 154 | ||
150 | ccr3 = getCx86(CX86_CCR3); | 155 | ccr3 = getCx86(CX86_CCR3); |
151 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
152 | 157 | ||
153 | 158 | ||
154 | /* FPU fast, DTE cache, Mem bypass */ | 159 | /* FPU fast, DTE cache, Mem bypass */ |
155 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); | 160 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); |
156 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
157 | 162 | ||
158 | set_cx86_memwb(); | 163 | set_cx86_memwb(); |
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void) | |||
161 | local_irq_restore(flags); | 166 | local_irq_restore(flags); |
162 | } | 167 | } |
163 | 168 | ||
169 | static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | ||
170 | { | ||
171 | unsigned char dir0, dir0_msn, dir1 = 0; | ||
172 | |||
173 | __do_cyrix_devid(&dir0, &dir1); | ||
174 | dir0_msn = dir0 >> 4; /* identifies CPU "family" */ | ||
175 | |||
176 | switch (dir0_msn) { | ||
177 | case 3: /* 6x86/6x86L */ | ||
178 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
179 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
180 | break; | ||
181 | case 5: /* 6x86MX/M II */ | ||
182 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
183 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
184 | break; | ||
185 | } | ||
186 | } | ||
164 | 187 | ||
165 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | 188 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) |
166 | { | 189 | { |
@@ -268,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
268 | /* GXm supports extended cpuid levels 'ala' AMD */ | 291 | /* GXm supports extended cpuid levels 'ala' AMD */ |
269 | if (c->cpuid_level == 2) { | 292 | if (c->cpuid_level == 2) { |
270 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ | 293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ |
271 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); | 294 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); |
272 | 295 | ||
273 | /* | 296 | /* |
274 | * GXm : 0x30 ... 0x5f GXm datasheet 51 | 297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 |
@@ -291,7 +314,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
291 | if (dir1 > 7) { | 314 | if (dir1 > 7) { |
292 | dir0_msn++; /* M II */ | 315 | dir0_msn++; /* M II */ |
293 | /* Enable MMX extensions (App note 108) */ | 316 | /* Enable MMX extensions (App note 108) */ |
294 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); | 317 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); |
295 | } else { | 318 | } else { |
296 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ | 319 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
297 | } | 320 | } |
@@ -406,7 +429,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
406 | local_irq_save(flags); | 429 | local_irq_save(flags); |
407 | ccr3 = getCx86(CX86_CCR3); | 430 | ccr3 = getCx86(CX86_CCR3); |
408 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 431 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
409 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */ | 432 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ |
410 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 433 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
411 | local_irq_restore(flags); | 434 | local_irq_restore(flags); |
412 | } | 435 | } |
@@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
416 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 439 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
417 | .c_vendor = "Cyrix", | 440 | .c_vendor = "Cyrix", |
418 | .c_ident = { "CyrixInstead" }, | 441 | .c_ident = { "CyrixInstead" }, |
442 | .c_early_init = early_init_cyrix, | ||
419 | .c_init = init_cyrix, | 443 | .c_init = init_cyrix, |
420 | .c_identify = cyrix_identify, | 444 | .c_identify = cyrix_identify, |
445 | .c_x86_vendor = X86_VENDOR_CYRIX, | ||
421 | }; | 446 | }; |
422 | 447 | ||
423 | cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); | 448 | cpu_dev_register(cyrix_cpu_dev); |
424 | 449 | ||
425 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 450 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { |
426 | .c_vendor = "NSC", | 451 | .c_vendor = "NSC", |
427 | .c_ident = { "Geode by NSC" }, | 452 | .c_ident = { "Geode by NSC" }, |
428 | .c_init = init_nsc, | 453 | .c_init = init_nsc, |
454 | .c_x86_vendor = X86_VENDOR_NSC, | ||
429 | }; | 455 | }; |
430 | 456 | ||
431 | cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); | 457 | cpu_dev_register(nsc_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c deleted file mode 100644 index e43ad4ad4cba..000000000000 --- a/arch/x86/kernel/cpu/feature_names.c +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * Strings for the various x86 capability flags. | ||
3 | * | ||
4 | * This file must not contain any executable code. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cpufeature.h> | ||
8 | |||
9 | /* | ||
10 | * These flag bits must match the definitions in <asm/cpufeature.h>. | ||
11 | * NULL means this bit is undefined or reserved; either way it doesn't | ||
12 | * have meaning as far as Linux is concerned. Note that it's important | ||
13 | * to realize there is a difference between this table and CPUID -- if | ||
14 | * applications want to get the raw CPUID data, they should access | ||
15 | * /dev/cpu/<cpu_nr>/cpuid instead. | ||
16 | */ | ||
17 | const char * const x86_cap_flags[NCAPINTS*32] = { | ||
18 | /* Intel-defined */ | ||
19 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | ||
20 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | ||
21 | "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", | ||
22 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", | ||
23 | |||
24 | /* AMD-defined */ | ||
25 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
26 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | ||
27 | NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, | ||
28 | NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", | ||
29 | "3dnowext", "3dnow", | ||
30 | |||
31 | /* Transmeta-defined */ | ||
32 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, | ||
33 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
34 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
35 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
36 | |||
37 | /* Other (Linux-defined) */ | ||
38 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | ||
39 | NULL, NULL, NULL, NULL, | ||
40 | "constant_tsc", "up", NULL, "arch_perfmon", | ||
41 | "pebs", "bts", NULL, NULL, | ||
42 | "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
43 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
44 | |||
45 | /* Intel-defined (#2) */ | ||
46 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | ||
47 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, | ||
48 | NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", | ||
49 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
50 | |||
51 | /* VIA/Cyrix/Centaur-defined */ | ||
52 | NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", | ||
53 | "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, | ||
54 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
55 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
56 | |||
57 | /* AMD-defined (#2) */ | ||
58 | "lahf_lm", "cmp_legacy", "svm", "extapic", | ||
59 | "cr8_legacy", "abm", "sse4a", "misalignsse", | ||
60 | "3dnowprefetch", "osvw", "ibs", "sse5", | ||
61 | "skinit", "wdt", NULL, NULL, | ||
62 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
63 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
64 | |||
65 | /* Auxiliary (Linux-defined) */ | ||
66 | "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
67 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
68 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
69 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
70 | }; | ||
71 | |||
72 | const char *const x86_power_flags[32] = { | ||
73 | "ts", /* temperature sensor */ | ||
74 | "fid", /* frequency id control */ | ||
75 | "vid", /* voltage id control */ | ||
76 | "ttp", /* thermal trip */ | ||
77 | "tm", | ||
78 | "stc", | ||
79 | "100mhzsteps", | ||
80 | "hwpstate", | ||
81 | "", /* tsc invariant mapped to constant_tsc */ | ||
82 | /* nothing */ | ||
83 | }; | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b75f2569b8f8..959417b8cd64 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -23,13 +23,6 @@ | |||
23 | #include <mach_apic.h> | 23 | #include <mach_apic.h> |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
27 | /* | ||
28 | * Alignment at which movsl is preferred for bulk memory copies. | ||
29 | */ | ||
30 | struct movsl_mask movsl_mask __read_mostly; | ||
31 | #endif | ||
32 | |||
33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 26 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
34 | { | 27 | { |
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | 28 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ |
@@ -183,9 +176,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
183 | if (p) | 176 | if (p) |
184 | strcpy(c->x86_model_id, p); | 177 | strcpy(c->x86_model_id, p); |
185 | 178 | ||
186 | c->x86_max_cores = num_cpu_cores(c); | 179 | detect_extended_topology(c); |
187 | 180 | ||
188 | detect_ht(c); | 181 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
182 | /* | ||
183 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
184 | * detection. | ||
185 | */ | ||
186 | c->x86_max_cores = num_cpu_cores(c); | ||
187 | detect_ht(c); | ||
188 | } | ||
189 | 189 | ||
190 | /* Work around errata */ | 190 | /* Work around errata */ |
191 | Intel_errata_workarounds(c); | 191 | Intel_errata_workarounds(c); |
@@ -310,73 +310,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
310 | .c_early_init = early_init_intel, | 310 | .c_early_init = early_init_intel, |
311 | .c_init = init_intel, | 311 | .c_init = init_intel, |
312 | .c_size_cache = intel_size_cache, | 312 | .c_size_cache = intel_size_cache, |
313 | .c_x86_vendor = X86_VENDOR_INTEL, | ||
313 | }; | 314 | }; |
314 | 315 | ||
315 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | 316 | cpu_dev_register(intel_cpu_dev); |
316 | |||
317 | #ifndef CONFIG_X86_CMPXCHG | ||
318 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
319 | { | ||
320 | u8 prev; | ||
321 | unsigned long flags; | ||
322 | |||
323 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
324 | local_irq_save(flags); | ||
325 | prev = *(u8 *)ptr; | ||
326 | if (prev == old) | ||
327 | *(u8 *)ptr = new; | ||
328 | local_irq_restore(flags); | ||
329 | return prev; | ||
330 | } | ||
331 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
332 | |||
333 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
334 | { | ||
335 | u16 prev; | ||
336 | unsigned long flags; | ||
337 | |||
338 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
339 | local_irq_save(flags); | ||
340 | prev = *(u16 *)ptr; | ||
341 | if (prev == old) | ||
342 | *(u16 *)ptr = new; | ||
343 | local_irq_restore(flags); | ||
344 | return prev; | ||
345 | } | ||
346 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
347 | |||
348 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
349 | { | ||
350 | u32 prev; | ||
351 | unsigned long flags; | ||
352 | |||
353 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
354 | local_irq_save(flags); | ||
355 | prev = *(u32 *)ptr; | ||
356 | if (prev == old) | ||
357 | *(u32 *)ptr = new; | ||
358 | local_irq_restore(flags); | ||
359 | return prev; | ||
360 | } | ||
361 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
362 | #endif | ||
363 | |||
364 | #ifndef CONFIG_X86_CMPXCHG64 | ||
365 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
366 | { | ||
367 | u64 prev; | ||
368 | unsigned long flags; | ||
369 | |||
370 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
371 | local_irq_save(flags); | ||
372 | prev = *(u64 *)ptr; | ||
373 | if (prev == old) | ||
374 | *(u64 *)ptr = new; | ||
375 | local_irq_restore(flags); | ||
376 | return prev; | ||
377 | } | ||
378 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
379 | #endif | ||
380 | 317 | ||
381 | /* arch_initcall(intel_cpu_init); */ | 318 | /* arch_initcall(intel_cpu_init); */ |
382 | 319 | ||
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c index 1019c58d39f0..0c0a58dfe099 100644 --- a/arch/x86/kernel/cpu/intel_64.c +++ b/arch/x86/kernel/cpu/intel_64.c | |||
@@ -80,7 +80,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
80 | if (c->x86 == 6) | 80 | if (c->x86 == 6) |
81 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 81 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
82 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 82 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
83 | c->x86_max_cores = intel_num_cpu_cores(c); | 83 | |
84 | detect_extended_topology(c); | ||
85 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) | ||
86 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
84 | 87 | ||
85 | srat_detect_node(); | 88 | srat_detect_node(); |
86 | } | 89 | } |
@@ -90,6 +93,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
90 | .c_ident = { "GenuineIntel" }, | 93 | .c_ident = { "GenuineIntel" }, |
91 | .c_early_init = early_init_intel, | 94 | .c_early_init = early_init_intel, |
92 | .c_init = init_intel, | 95 | .c_init = init_intel, |
96 | .c_x86_vendor = X86_VENDOR_INTEL, | ||
93 | }; | 97 | }; |
94 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | ||
95 | 98 | ||
99 | cpu_dev_register(intel_cpu_dev); | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b0a10b002f1..3f46afbb1cf1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Routines to indentify caches on Intel CPU. | 2 | * Routines to indentify caches on Intel CPU. |
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/pci.h> | ||
16 | 17 | ||
17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
@@ -130,9 +131,18 @@ struct _cpuid4_info { | |||
130 | union _cpuid4_leaf_ebx ebx; | 131 | union _cpuid4_leaf_ebx ebx; |
131 | union _cpuid4_leaf_ecx ecx; | 132 | union _cpuid4_leaf_ecx ecx; |
132 | unsigned long size; | 133 | unsigned long size; |
134 | unsigned long can_disable; | ||
133 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 135 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ |
134 | }; | 136 | }; |
135 | 137 | ||
138 | #ifdef CONFIG_PCI | ||
139 | static struct pci_device_id k8_nb_id[] = { | ||
140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | ||
141 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | ||
142 | {} | ||
143 | }; | ||
144 | #endif | ||
145 | |||
136 | unsigned short num_cache_leaves; | 146 | unsigned short num_cache_leaves; |
137 | 147 | ||
138 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 148 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = { | |||
182 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 192 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; |
183 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 193 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; |
184 | 194 | ||
185 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 195 | static void __cpuinit |
186 | union _cpuid4_leaf_ebx *ebx, | 196 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
187 | union _cpuid4_leaf_ecx *ecx) | 197 | union _cpuid4_leaf_ebx *ebx, |
198 | union _cpuid4_leaf_ecx *ecx) | ||
188 | { | 199 | { |
189 | unsigned dummy; | 200 | unsigned dummy; |
190 | unsigned line_size, lines_per_tag, assoc, size_in_kb; | 201 | unsigned line_size, lines_per_tag, assoc, size_in_kb; |
@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
251 | (ebx->split.ways_of_associativity + 1) - 1; | 262 | (ebx->split.ways_of_associativity + 1) - 1; |
252 | } | 263 | } |
253 | 264 | ||
254 | static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 265 | static void __cpuinit |
266 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | ||
267 | { | ||
268 | if (index < 3) | ||
269 | return; | ||
270 | this_leaf->can_disable = 1; | ||
271 | } | ||
272 | |||
273 | static int | ||
274 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
255 | { | 275 | { |
256 | union _cpuid4_leaf_eax eax; | 276 | union _cpuid4_leaf_eax eax; |
257 | union _cpuid4_leaf_ebx ebx; | 277 | union _cpuid4_leaf_ebx ebx; |
258 | union _cpuid4_leaf_ecx ecx; | 278 | union _cpuid4_leaf_ecx ecx; |
259 | unsigned edx; | 279 | unsigned edx; |
260 | 280 | ||
261 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | 281 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
262 | amd_cpuid4(index, &eax, &ebx, &ecx); | 282 | amd_cpuid4(index, &eax, &ebx, &ecx); |
263 | else | 283 | if (boot_cpu_data.x86 >= 0x10) |
264 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 284 | amd_check_l3_disable(index, this_leaf); |
285 | } else { | ||
286 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | ||
287 | } | ||
288 | |||
265 | if (eax.split.type == CACHE_TYPE_NULL) | 289 | if (eax.split.type == CACHE_TYPE_NULL) |
266 | return -EIO; /* better error ? */ | 290 | return -EIO; /* better error ? */ |
267 | 291 | ||
268 | this_leaf->eax = eax; | 292 | this_leaf->eax = eax; |
269 | this_leaf->ebx = ebx; | 293 | this_leaf->ebx = ebx; |
270 | this_leaf->ecx = ecx; | 294 | this_leaf->ecx = ecx; |
271 | this_leaf->size = (ecx.split.number_of_sets + 1) * | 295 | this_leaf->size = (ecx.split.number_of_sets + 1) * |
272 | (ebx.split.coherency_line_size + 1) * | 296 | (ebx.split.coherency_line_size + 1) * |
273 | (ebx.split.physical_line_partition + 1) * | 297 | (ebx.split.physical_line_partition + 1) * |
274 | (ebx.split.ways_of_associativity + 1); | 298 | (ebx.split.ways_of_associativity + 1); |
275 | return 0; | 299 | return 0; |
276 | } | 300 | } |
277 | 301 | ||
@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
453 | 477 | ||
454 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 478 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
455 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 479 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
456 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 480 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
457 | 481 | ||
458 | #ifdef CONFIG_SMP | 482 | #ifdef CONFIG_SMP |
459 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 483 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
490 | 514 | ||
491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 515 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 516 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 517 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 518 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
495 | } | 519 | } |
496 | } | 520 | } |
@@ -572,7 +596,7 @@ struct _index_kobject { | |||
572 | 596 | ||
573 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 597 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
574 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 598 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); |
575 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 599 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) |
576 | 600 | ||
577 | #define show_one_plus(file_name, object, val) \ | 601 | #define show_one_plus(file_name, object, val) \ |
578 | static ssize_t show_##file_name \ | 602 | static ssize_t show_##file_name \ |
@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { | |||
637 | } | 661 | } |
638 | } | 662 | } |
639 | 663 | ||
664 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
665 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
666 | |||
667 | #ifdef CONFIG_PCI | ||
668 | static struct pci_dev *get_k8_northbridge(int node) | ||
669 | { | ||
670 | struct pci_dev *dev = NULL; | ||
671 | int i; | ||
672 | |||
673 | for (i = 0; i <= node; i++) { | ||
674 | do { | ||
675 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
676 | if (!dev) | ||
677 | break; | ||
678 | } while (!pci_match_id(&k8_nb_id[0], dev)); | ||
679 | if (!dev) | ||
680 | break; | ||
681 | } | ||
682 | return dev; | ||
683 | } | ||
684 | #else | ||
685 | static struct pci_dev *get_k8_northbridge(int node) | ||
686 | { | ||
687 | return NULL; | ||
688 | } | ||
689 | #endif | ||
690 | |||
691 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | ||
692 | { | ||
693 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
694 | struct pci_dev *dev = NULL; | ||
695 | ssize_t ret = 0; | ||
696 | int i; | ||
697 | |||
698 | if (!this_leaf->can_disable) | ||
699 | return sprintf(buf, "Feature not enabled\n"); | ||
700 | |||
701 | dev = get_k8_northbridge(node); | ||
702 | if (!dev) { | ||
703 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
704 | return -EINVAL; | ||
705 | } | ||
706 | |||
707 | for (i = 0; i < 2; i++) { | ||
708 | unsigned int reg; | ||
709 | |||
710 | pci_read_config_dword(dev, 0x1BC + i * 4, ®); | ||
711 | |||
712 | ret += sprintf(buf, "%sEntry: %d\n", buf, i); | ||
713 | ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n", | ||
714 | buf, | ||
715 | reg & 0x80000000 ? "Disabled" : "Allowed", | ||
716 | reg & 0x40000000 ? "Disabled" : "Allowed"); | ||
717 | ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", | ||
718 | buf, (reg & 0x30000) >> 16, reg & 0xfff); | ||
719 | } | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static ssize_t | ||
724 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | ||
725 | size_t count) | ||
726 | { | ||
727 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
728 | struct pci_dev *dev = NULL; | ||
729 | unsigned int ret, index, val; | ||
730 | |||
731 | if (!this_leaf->can_disable) | ||
732 | return 0; | ||
733 | |||
734 | if (strlen(buf) > 15) | ||
735 | return -EINVAL; | ||
736 | |||
737 | ret = sscanf(buf, "%x %x", &index, &val); | ||
738 | if (ret != 2) | ||
739 | return -EINVAL; | ||
740 | if (index > 1) | ||
741 | return -EINVAL; | ||
742 | |||
743 | val |= 0xc0000000; | ||
744 | dev = get_k8_northbridge(node); | ||
745 | if (!dev) { | ||
746 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | |||
750 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | ||
751 | wbinvd(); | ||
752 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | ||
753 | |||
754 | return 1; | ||
755 | } | ||
756 | |||
640 | struct _cache_attr { | 757 | struct _cache_attr { |
641 | struct attribute attr; | 758 | struct attribute attr; |
642 | ssize_t (*show)(struct _cpuid4_info *, char *); | 759 | ssize_t (*show)(struct _cpuid4_info *, char *); |
@@ -657,6 +774,8 @@ define_one_ro(size); | |||
657 | define_one_ro(shared_cpu_map); | 774 | define_one_ro(shared_cpu_map); |
658 | define_one_ro(shared_cpu_list); | 775 | define_one_ro(shared_cpu_list); |
659 | 776 | ||
777 | static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable); | ||
778 | |||
660 | static struct attribute * default_attrs[] = { | 779 | static struct attribute * default_attrs[] = { |
661 | &type.attr, | 780 | &type.attr, |
662 | &level.attr, | 781 | &level.attr, |
@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = { | |||
667 | &size.attr, | 786 | &size.attr, |
668 | &shared_cpu_map.attr, | 787 | &shared_cpu_map.attr, |
669 | &shared_cpu_list.attr, | 788 | &shared_cpu_list.attr, |
789 | &cache_disable.attr, | ||
670 | NULL | 790 | NULL |
671 | }; | 791 | }; |
672 | 792 | ||
673 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
674 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
675 | |||
676 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 793 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) |
677 | { | 794 | { |
678 | struct _cache_attr *fattr = to_attr(attr); | 795 | struct _cache_attr *fattr = to_attr(attr); |
@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
682 | ret = fattr->show ? | 799 | ret = fattr->show ? |
683 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 800 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
684 | buf) : | 801 | buf) : |
685 | 0; | 802 | 0; |
686 | return ret; | 803 | return ret; |
687 | } | 804 | } |
688 | 805 | ||
689 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 806 | static ssize_t store(struct kobject * kobj, struct attribute * attr, |
690 | const char * buf, size_t count) | 807 | const char * buf, size_t count) |
691 | { | 808 | { |
692 | return 0; | 809 | struct _cache_attr *fattr = to_attr(attr); |
810 | struct _index_kobject *this_leaf = to_object(kobj); | ||
811 | ssize_t ret; | ||
812 | |||
813 | ret = fattr->store ? | ||
814 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
815 | buf, count) : | ||
816 | 0; | ||
817 | return ret; | ||
693 | } | 818 | } |
694 | 819 | ||
695 | static struct sysfs_ops sysfs_ops = { | 820 | static struct sysfs_ops sysfs_ops = { |
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl new file mode 100644 index 000000000000..dfea390e1608 --- /dev/null +++ b/arch/x86/kernel/cpu/mkcapflags.pl | |||
@@ -0,0 +1,32 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h | ||
4 | # | ||
5 | |||
6 | ($in, $out) = @ARGV; | ||
7 | |||
8 | open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n"; | ||
9 | open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n"; | ||
10 | |||
11 | print OUT "#include <asm/cpufeature.h>\n\n"; | ||
12 | print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n"; | ||
13 | |||
14 | while (defined($line = <IN>)) { | ||
15 | if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) { | ||
16 | $macro = $1; | ||
17 | $feature = $2; | ||
18 | $tail = $3; | ||
19 | if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) { | ||
20 | $feature = $1; | ||
21 | } | ||
22 | |||
23 | if ($feature ne '') { | ||
24 | printf OUT "\t%-32s = \"%s\",\n", | ||
25 | "[$macro]", "\L$feature"; | ||
26 | } | ||
27 | } | ||
28 | } | ||
29 | print OUT "};\n"; | ||
30 | |||
31 | close(IN); | ||
32 | close(OUT); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index b117d7f8a564..58ac5d3d4361 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -729,7 +729,7 @@ struct var_mtrr_range_state { | |||
729 | mtrr_type type; | 729 | mtrr_type type; |
730 | }; | 730 | }; |
731 | 731 | ||
732 | struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | 732 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; |
733 | static int __initdata debug_print; | 733 | static int __initdata debug_print; |
734 | 734 | ||
735 | static int __init | 735 | static int __init |
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c new file mode 100644 index 000000000000..5abbea297e0c --- /dev/null +++ b/arch/x86/kernel/cpu/powerflags.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Strings for the various x86 power flags | ||
3 | * | ||
4 | * This file must not contain any executable code. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cpufeature.h> | ||
8 | |||
9 | const char *const x86_power_flags[32] = { | ||
10 | "ts", /* temperature sensor */ | ||
11 | "fid", /* frequency id control */ | ||
12 | "vid", /* voltage id control */ | ||
13 | "ttp", /* thermal trip */ | ||
14 | "tm", | ||
15 | "stc", | ||
16 | "100mhzsteps", | ||
17 | "hwpstate", | ||
18 | "", /* tsc invariant mapped to constant_tsc */ | ||
19 | /* nothing */ | ||
20 | }; | ||
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index b911a2c61b8f..7c46e6ecedca 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | |||
102 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 102 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
103 | .c_init = init_transmeta, | 103 | .c_init = init_transmeta, |
104 | .c_identify = transmeta_identify, | 104 | .c_identify = transmeta_identify, |
105 | .c_x86_vendor = X86_VENDOR_TRANSMETA, | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); | 108 | cpu_dev_register(transmeta_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index b1fc90989d75..e777f79e0960 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = { | |||
19 | } | 19 | } |
20 | }, | 20 | }, |
21 | }, | 21 | }, |
22 | .c_x86_vendor = X86_VENDOR_UMC, | ||
22 | }; | 23 | }; |
23 | 24 | ||
24 | cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); | 25 | cpu_dev_register(umc_cpu_dev); |
25 | 26 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 9af89078f7bb..e24d1bc47b46 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -148,6 +148,9 @@ void __init e820_print_map(char *who) | |||
148 | case E820_NVS: | 148 | case E820_NVS: |
149 | printk(KERN_CONT "(ACPI NVS)\n"); | 149 | printk(KERN_CONT "(ACPI NVS)\n"); |
150 | break; | 150 | break; |
151 | case E820_UNUSABLE: | ||
152 | printk("(unusable)\n"); | ||
153 | break; | ||
151 | default: | 154 | default: |
152 | printk(KERN_CONT "type %u\n", e820.map[i].type); | 155 | printk(KERN_CONT "type %u\n", e820.map[i].type); |
153 | break; | 156 | break; |
@@ -1260,6 +1263,7 @@ static inline const char *e820_type_to_string(int e820_type) | |||
1260 | case E820_RAM: return "System RAM"; | 1263 | case E820_RAM: return "System RAM"; |
1261 | case E820_ACPI: return "ACPI Tables"; | 1264 | case E820_ACPI: return "ACPI Tables"; |
1262 | case E820_NVS: return "ACPI Non-volatile Storage"; | 1265 | case E820_NVS: return "ACPI Non-volatile Storage"; |
1266 | case E820_UNUSABLE: return "Unusable memory"; | ||
1263 | default: return "reserved"; | 1267 | default: return "reserved"; |
1264 | } | 1268 | } |
1265 | } | 1269 | } |
@@ -1267,6 +1271,7 @@ static inline const char *e820_type_to_string(int e820_type) | |||
1267 | /* | 1271 | /* |
1268 | * Mark e820 reserved areas as busy for the resource manager. | 1272 | * Mark e820 reserved areas as busy for the resource manager. |
1269 | */ | 1273 | */ |
1274 | static struct resource __initdata *e820_res; | ||
1270 | void __init e820_reserve_resources(void) | 1275 | void __init e820_reserve_resources(void) |
1271 | { | 1276 | { |
1272 | int i; | 1277 | int i; |
@@ -1274,6 +1279,7 @@ void __init e820_reserve_resources(void) | |||
1274 | u64 end; | 1279 | u64 end; |
1275 | 1280 | ||
1276 | res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); | 1281 | res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); |
1282 | e820_res = res; | ||
1277 | for (i = 0; i < e820.nr_map; i++) { | 1283 | for (i = 0; i < e820.nr_map; i++) { |
1278 | end = e820.map[i].addr + e820.map[i].size - 1; | 1284 | end = e820.map[i].addr + e820.map[i].size - 1; |
1279 | #ifndef CONFIG_RESOURCES_64BIT | 1285 | #ifndef CONFIG_RESOURCES_64BIT |
@@ -1287,7 +1293,14 @@ void __init e820_reserve_resources(void) | |||
1287 | res->end = end; | 1293 | res->end = end; |
1288 | 1294 | ||
1289 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 1295 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
1290 | insert_resource(&iomem_resource, res); | 1296 | |
1297 | /* | ||
1298 | * don't register the region that could be conflicted with | ||
1299 | * pci device BAR resource and insert them later in | ||
1300 | * pcibios_resource_survey() | ||
1301 | */ | ||
1302 | if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) | ||
1303 | insert_resource(&iomem_resource, res); | ||
1291 | res++; | 1304 | res++; |
1292 | } | 1305 | } |
1293 | 1306 | ||
@@ -1299,6 +1312,19 @@ void __init e820_reserve_resources(void) | |||
1299 | } | 1312 | } |
1300 | } | 1313 | } |
1301 | 1314 | ||
1315 | void __init e820_reserve_resources_late(void) | ||
1316 | { | ||
1317 | int i; | ||
1318 | struct resource *res; | ||
1319 | |||
1320 | res = e820_res; | ||
1321 | for (i = 0; i < e820.nr_map; i++) { | ||
1322 | if (!res->parent && res->end) | ||
1323 | reserve_region_with_split(&iomem_resource, res->start, res->end, res->name); | ||
1324 | res++; | ||
1325 | } | ||
1326 | } | ||
1327 | |||
1302 | char *__init default_machine_specific_memory_setup(void) | 1328 | char *__init default_machine_specific_memory_setup(void) |
1303 | { | 1329 | { |
1304 | char *who = "BIOS-e820"; | 1330 | char *who = "BIOS-e820"; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 89434d439605..cf3a0b2d0059 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -275,9 +275,9 @@ ENTRY(native_usergs_sysret64) | |||
275 | ENTRY(ret_from_fork) | 275 | ENTRY(ret_from_fork) |
276 | CFI_DEFAULT_STACK | 276 | CFI_DEFAULT_STACK |
277 | push kernel_eflags(%rip) | 277 | push kernel_eflags(%rip) |
278 | CFI_ADJUST_CFA_OFFSET 4 | 278 | CFI_ADJUST_CFA_OFFSET 8 |
279 | popf # reset kernel eflags | 279 | popf # reset kernel eflags |
280 | CFI_ADJUST_CFA_OFFSET -4 | 280 | CFI_ADJUST_CFA_OFFSET -8 |
281 | call schedule_tail | 281 | call schedule_tail |
282 | GET_THREAD_INFO(%rcx) | 282 | GET_THREAD_INFO(%rcx) |
283 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | 283 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
diff --git a/arch/x86/mach-es7000/es7000plat.c b/arch/x86/kernel/es7000_32.c index 50189af14b85..849e5cd485b8 100644 --- a/arch/x86/mach-es7000/es7000plat.c +++ b/arch/x86/kernel/es7000_32.c | |||
@@ -39,10 +39,93 @@ | |||
39 | #include <asm/nmi.h> | 39 | #include <asm/nmi.h> |
40 | #include <asm/smp.h> | 40 | #include <asm/smp.h> |
41 | #include <asm/apicdef.h> | 41 | #include <asm/apicdef.h> |
42 | #include "es7000.h" | ||
43 | #include <mach_mpparse.h> | 42 | #include <mach_mpparse.h> |
44 | 43 | ||
45 | /* | 44 | /* |
45 | * ES7000 chipsets | ||
46 | */ | ||
47 | |||
48 | #define NON_UNISYS 0 | ||
49 | #define ES7000_CLASSIC 1 | ||
50 | #define ES7000_ZORRO 2 | ||
51 | |||
52 | |||
53 | #define MIP_REG 1 | ||
54 | #define MIP_PSAI_REG 4 | ||
55 | |||
56 | #define MIP_BUSY 1 | ||
57 | #define MIP_SPIN 0xf0000 | ||
58 | #define MIP_VALID 0x0100000000000000ULL | ||
59 | #define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) | ||
60 | |||
61 | #define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) | ||
62 | |||
63 | struct mip_reg_info { | ||
64 | unsigned long long mip_info; | ||
65 | unsigned long long delivery_info; | ||
66 | unsigned long long host_reg; | ||
67 | unsigned long long mip_reg; | ||
68 | }; | ||
69 | |||
70 | struct part_info { | ||
71 | unsigned char type; | ||
72 | unsigned char length; | ||
73 | unsigned char part_id; | ||
74 | unsigned char apic_mode; | ||
75 | unsigned long snum; | ||
76 | char ptype[16]; | ||
77 | char sname[64]; | ||
78 | char pname[64]; | ||
79 | }; | ||
80 | |||
81 | struct psai { | ||
82 | unsigned long long entry_type; | ||
83 | unsigned long long addr; | ||
84 | unsigned long long bep_addr; | ||
85 | }; | ||
86 | |||
87 | struct es7000_mem_info { | ||
88 | unsigned char type; | ||
89 | unsigned char length; | ||
90 | unsigned char resv[6]; | ||
91 | unsigned long long start; | ||
92 | unsigned long long size; | ||
93 | }; | ||
94 | |||
95 | struct es7000_oem_table { | ||
96 | unsigned long long hdr; | ||
97 | struct mip_reg_info mip; | ||
98 | struct part_info pif; | ||
99 | struct es7000_mem_info shm; | ||
100 | struct psai psai; | ||
101 | }; | ||
102 | |||
103 | #ifdef CONFIG_ACPI | ||
104 | |||
105 | struct oem_table { | ||
106 | struct acpi_table_header Header; | ||
107 | u32 OEMTableAddr; | ||
108 | u32 OEMTableSize; | ||
109 | }; | ||
110 | |||
111 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | ||
112 | #endif | ||
113 | |||
114 | struct mip_reg { | ||
115 | unsigned long long off_0; | ||
116 | unsigned long long off_8; | ||
117 | unsigned long long off_10; | ||
118 | unsigned long long off_18; | ||
119 | unsigned long long off_20; | ||
120 | unsigned long long off_28; | ||
121 | unsigned long long off_30; | ||
122 | unsigned long long off_38; | ||
123 | }; | ||
124 | |||
125 | #define MIP_SW_APIC 0x1020b | ||
126 | #define MIP_FUNC(VALUE) (VALUE & 0xff) | ||
127 | |||
128 | /* | ||
46 | * ES7000 Globals | 129 | * ES7000 Globals |
47 | */ | 130 | */ |
48 | 131 | ||
@@ -72,7 +155,7 @@ es7000_rename_gsi(int ioapic, int gsi) | |||
72 | base += nr_ioapic_registers[i]; | 155 | base += nr_ioapic_registers[i]; |
73 | } | 156 | } |
74 | 157 | ||
75 | if (!ioapic && (gsi < 16)) | 158 | if (!ioapic && (gsi < 16)) |
76 | gsi += base; | 159 | gsi += base; |
77 | return gsi; | 160 | return gsi; |
78 | } | 161 | } |
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index eaff0bbb1444..6c9bfc9e1e95 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c | |||
@@ -16,87 +16,63 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/hardirq.h> | 18 | #include <linux/hardirq.h> |
19 | #include <linux/dmar.h> | ||
19 | 20 | ||
20 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
21 | #include <asm/ipi.h> | 22 | #include <asm/ipi.h> |
22 | #include <asm/genapic.h> | 23 | #include <asm/genapic.h> |
23 | 24 | ||
24 | #ifdef CONFIG_ACPI | 25 | extern struct genapic apic_flat; |
25 | #include <acpi/acpi_bus.h> | 26 | extern struct genapic apic_physflat; |
26 | #endif | 27 | extern struct genapic apic_x2xpic_uv_x; |
27 | 28 | extern struct genapic apic_x2apic_phys; | |
28 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 29 | extern struct genapic apic_x2apic_cluster; |
29 | 30 | ||
30 | struct genapic __read_mostly *genapic = &apic_flat; | 31 | struct genapic __read_mostly *genapic = &apic_flat; |
31 | 32 | ||
32 | static enum uv_system_type uv_system_type; | 33 | static struct genapic *apic_probe[] __initdata = { |
34 | &apic_x2apic_uv_x, | ||
35 | &apic_x2apic_phys, | ||
36 | &apic_x2apic_cluster, | ||
37 | &apic_physflat, | ||
38 | NULL, | ||
39 | }; | ||
33 | 40 | ||
34 | /* | 41 | /* |
35 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. | 42 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. |
36 | */ | 43 | */ |
37 | void __init setup_apic_routing(void) | 44 | void __init setup_apic_routing(void) |
38 | { | 45 | { |
39 | if (uv_system_type == UV_NON_UNIQUE_APIC) | 46 | if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) { |
40 | genapic = &apic_x2apic_uv_x; | 47 | if (!intr_remapping_enabled) |
41 | else | 48 | genapic = &apic_flat; |
42 | #ifdef CONFIG_ACPI | 49 | } |
43 | /* | ||
44 | * Quirk: some x86_64 machines can only use physical APIC mode | ||
45 | * regardless of how many processors are present (x86_64 ES7000 | ||
46 | * is an example). | ||
47 | */ | ||
48 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | ||
49 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) | ||
50 | genapic = &apic_physflat; | ||
51 | else | ||
52 | #endif | ||
53 | |||
54 | if (max_physical_apicid < 8) | ||
55 | genapic = &apic_flat; | ||
56 | else | ||
57 | genapic = &apic_physflat; | ||
58 | 50 | ||
59 | printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); | 51 | if (genapic == &apic_flat) { |
52 | if (max_physical_apicid >= 8) | ||
53 | genapic = &apic_physflat; | ||
54 | printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); | ||
55 | } | ||
60 | } | 56 | } |
61 | 57 | ||
62 | /* Same for both flat and physical. */ | 58 | /* Same for both flat and physical. */ |
63 | 59 | ||
64 | void send_IPI_self(int vector) | 60 | void apic_send_IPI_self(int vector) |
65 | { | 61 | { |
66 | __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); | 62 | __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); |
67 | } | 63 | } |
68 | 64 | ||
69 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 65 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
70 | { | 66 | { |
71 | if (!strcmp(oem_id, "SGI")) { | 67 | int i; |
72 | if (!strcmp(oem_table_id, "UVL")) | 68 | |
73 | uv_system_type = UV_LEGACY_APIC; | 69 | for (i = 0; apic_probe[i]; ++i) { |
74 | else if (!strcmp(oem_table_id, "UVX")) | 70 | if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { |
75 | uv_system_type = UV_X2APIC; | 71 | genapic = apic_probe[i]; |
76 | else if (!strcmp(oem_table_id, "UVH")) | 72 | printk(KERN_INFO "Setting APIC routing to %s.\n", |
77 | uv_system_type = UV_NON_UNIQUE_APIC; | 73 | genapic->name); |
74 | return 1; | ||
75 | } | ||
78 | } | 76 | } |
79 | return 0; | 77 | return 0; |
80 | } | 78 | } |
81 | |||
82 | unsigned int read_apic_id(void) | ||
83 | { | ||
84 | unsigned int id; | ||
85 | |||
86 | WARN_ON(preemptible() && num_online_cpus() > 1); | ||
87 | id = apic_read(APIC_ID); | ||
88 | if (uv_system_type >= UV_X2APIC) | ||
89 | id |= __get_cpu_var(x2apic_extra_bits); | ||
90 | return id; | ||
91 | } | ||
92 | |||
93 | enum uv_system_type get_uv_system_type(void) | ||
94 | { | ||
95 | return uv_system_type; | ||
96 | } | ||
97 | |||
98 | int is_uv_system(void) | ||
99 | { | ||
100 | return uv_system_type != UV_NONE; | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(is_uv_system); | ||
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 786548a62d38..9eca5ba7a6b1 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -15,9 +15,20 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/hardirq.h> | ||
18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
19 | #include <asm/ipi.h> | 20 | #include <asm/ipi.h> |
20 | #include <asm/genapic.h> | 21 | #include <asm/genapic.h> |
22 | #include <mach_apicdef.h> | ||
23 | |||
24 | #ifdef CONFIG_ACPI | ||
25 | #include <acpi/acpi_bus.h> | ||
26 | #endif | ||
27 | |||
28 | static int __init flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
29 | { | ||
30 | return 1; | ||
31 | } | ||
21 | 32 | ||
22 | static cpumask_t flat_target_cpus(void) | 33 | static cpumask_t flat_target_cpus(void) |
23 | { | 34 | { |
@@ -95,9 +106,33 @@ static void flat_send_IPI_all(int vector) | |||
95 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 106 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
96 | } | 107 | } |
97 | 108 | ||
109 | static unsigned int get_apic_id(unsigned long x) | ||
110 | { | ||
111 | unsigned int id; | ||
112 | |||
113 | id = (((x)>>24) & 0xFFu); | ||
114 | return id; | ||
115 | } | ||
116 | |||
117 | static unsigned long set_apic_id(unsigned int id) | ||
118 | { | ||
119 | unsigned long x; | ||
120 | |||
121 | x = ((id & 0xFFu)<<24); | ||
122 | return x; | ||
123 | } | ||
124 | |||
125 | static unsigned int read_xapic_id(void) | ||
126 | { | ||
127 | unsigned int id; | ||
128 | |||
129 | id = get_apic_id(apic_read(APIC_ID)); | ||
130 | return id; | ||
131 | } | ||
132 | |||
98 | static int flat_apic_id_registered(void) | 133 | static int flat_apic_id_registered(void) |
99 | { | 134 | { |
100 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); | 135 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
101 | } | 136 | } |
102 | 137 | ||
103 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 138 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) |
@@ -112,6 +147,7 @@ static unsigned int phys_pkg_id(int index_msb) | |||
112 | 147 | ||
113 | struct genapic apic_flat = { | 148 | struct genapic apic_flat = { |
114 | .name = "flat", | 149 | .name = "flat", |
150 | .acpi_madt_oem_check = flat_acpi_madt_oem_check, | ||
115 | .int_delivery_mode = dest_LowestPrio, | 151 | .int_delivery_mode = dest_LowestPrio, |
116 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), | 152 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), |
117 | .target_cpus = flat_target_cpus, | 153 | .target_cpus = flat_target_cpus, |
@@ -121,8 +157,12 @@ struct genapic apic_flat = { | |||
121 | .send_IPI_all = flat_send_IPI_all, | 157 | .send_IPI_all = flat_send_IPI_all, |
122 | .send_IPI_allbutself = flat_send_IPI_allbutself, | 158 | .send_IPI_allbutself = flat_send_IPI_allbutself, |
123 | .send_IPI_mask = flat_send_IPI_mask, | 159 | .send_IPI_mask = flat_send_IPI_mask, |
160 | .send_IPI_self = apic_send_IPI_self, | ||
124 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | 161 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, |
125 | .phys_pkg_id = phys_pkg_id, | 162 | .phys_pkg_id = phys_pkg_id, |
163 | .get_apic_id = get_apic_id, | ||
164 | .set_apic_id = set_apic_id, | ||
165 | .apic_id_mask = (0xFFu<<24), | ||
126 | }; | 166 | }; |
127 | 167 | ||
128 | /* | 168 | /* |
@@ -130,6 +170,21 @@ struct genapic apic_flat = { | |||
130 | * We cannot use logical delivery in this case because the mask | 170 | * We cannot use logical delivery in this case because the mask |
131 | * overflows, so use physical mode. | 171 | * overflows, so use physical mode. |
132 | */ | 172 | */ |
173 | static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
174 | { | ||
175 | #ifdef CONFIG_ACPI | ||
176 | /* | ||
177 | * Quirk: some x86_64 machines can only use physical APIC mode | ||
178 | * regardless of how many processors are present (x86_64 ES7000 | ||
179 | * is an example). | ||
180 | */ | ||
181 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | ||
182 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) | ||
183 | return 1; | ||
184 | #endif | ||
185 | |||
186 | return 0; | ||
187 | } | ||
133 | 188 | ||
134 | static cpumask_t physflat_target_cpus(void) | 189 | static cpumask_t physflat_target_cpus(void) |
135 | { | 190 | { |
@@ -176,6 +231,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
176 | 231 | ||
177 | struct genapic apic_physflat = { | 232 | struct genapic apic_physflat = { |
178 | .name = "physical flat", | 233 | .name = "physical flat", |
234 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, | ||
179 | .int_delivery_mode = dest_Fixed, | 235 | .int_delivery_mode = dest_Fixed, |
180 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | 236 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), |
181 | .target_cpus = physflat_target_cpus, | 237 | .target_cpus = physflat_target_cpus, |
@@ -185,6 +241,10 @@ struct genapic apic_physflat = { | |||
185 | .send_IPI_all = physflat_send_IPI_all, | 241 | .send_IPI_all = physflat_send_IPI_all, |
186 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | 242 | .send_IPI_allbutself = physflat_send_IPI_allbutself, |
187 | .send_IPI_mask = physflat_send_IPI_mask, | 243 | .send_IPI_mask = physflat_send_IPI_mask, |
244 | .send_IPI_self = apic_send_IPI_self, | ||
188 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | 245 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, |
189 | .phys_pkg_id = phys_pkg_id, | 246 | .phys_pkg_id = phys_pkg_id, |
247 | .get_apic_id = get_apic_id, | ||
248 | .set_apic_id = set_apic_id, | ||
249 | .apic_id_mask = (0xFFu<<24), | ||
190 | }; | 250 | }; |
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c new file mode 100644 index 000000000000..e4bf2cc0d743 --- /dev/null +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -0,0 +1,159 @@ | |||
1 | #include <linux/threads.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/string.h> | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/ctype.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | ||
8 | |||
9 | #include <asm/smp.h> | ||
10 | #include <asm/ipi.h> | ||
11 | #include <asm/genapic.h> | ||
12 | |||
13 | DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); | ||
14 | |||
15 | static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
16 | { | ||
17 | if (cpu_has_x2apic) | ||
18 | return 1; | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
24 | |||
25 | static cpumask_t x2apic_target_cpus(void) | ||
26 | { | ||
27 | return cpumask_of_cpu(0); | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * for now each logical cpu is in its own vector allocation domain. | ||
32 | */ | ||
33 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | ||
34 | { | ||
35 | cpumask_t domain = CPU_MASK_NONE; | ||
36 | cpu_set(cpu, domain); | ||
37 | return domain; | ||
38 | } | ||
39 | |||
40 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | ||
41 | unsigned int dest) | ||
42 | { | ||
43 | unsigned long cfg; | ||
44 | |||
45 | cfg = __prepare_ICR(0, vector, dest); | ||
46 | |||
47 | /* | ||
48 | * send the IPI. | ||
49 | */ | ||
50 | x2apic_icr_write(cfg, apicid); | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * for now, we send the IPI's one by one in the cpumask. | ||
55 | * TBD: Based on the cpu mask, we can send the IPI's to the cluster group | ||
56 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | ||
57 | * writes. | ||
58 | */ | ||
59 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | ||
60 | { | ||
61 | unsigned long flags; | ||
62 | unsigned long query_cpu; | ||
63 | |||
64 | local_irq_save(flags); | ||
65 | for_each_cpu_mask(query_cpu, mask) { | ||
66 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
67 | vector, APIC_DEST_LOGICAL); | ||
68 | } | ||
69 | local_irq_restore(flags); | ||
70 | } | ||
71 | |||
72 | static void x2apic_send_IPI_allbutself(int vector) | ||
73 | { | ||
74 | cpumask_t mask = cpu_online_map; | ||
75 | |||
76 | cpu_clear(smp_processor_id(), mask); | ||
77 | |||
78 | if (!cpus_empty(mask)) | ||
79 | x2apic_send_IPI_mask(mask, vector); | ||
80 | } | ||
81 | |||
82 | static void x2apic_send_IPI_all(int vector) | ||
83 | { | ||
84 | x2apic_send_IPI_mask(cpu_online_map, vector); | ||
85 | } | ||
86 | |||
87 | static int x2apic_apic_id_registered(void) | ||
88 | { | ||
89 | return 1; | ||
90 | } | ||
91 | |||
92 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | ||
93 | { | ||
94 | int cpu; | ||
95 | |||
96 | /* | ||
97 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
98 | * May as well be the first. | ||
99 | */ | ||
100 | cpu = first_cpu(cpumask); | ||
101 | if ((unsigned)cpu < NR_CPUS) | ||
102 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
103 | else | ||
104 | return BAD_APICID; | ||
105 | } | ||
106 | |||
107 | static unsigned int get_apic_id(unsigned long x) | ||
108 | { | ||
109 | unsigned int id; | ||
110 | |||
111 | id = x; | ||
112 | return id; | ||
113 | } | ||
114 | |||
115 | static unsigned long set_apic_id(unsigned int id) | ||
116 | { | ||
117 | unsigned long x; | ||
118 | |||
119 | x = id; | ||
120 | return x; | ||
121 | } | ||
122 | |||
123 | static unsigned int phys_pkg_id(int index_msb) | ||
124 | { | ||
125 | return current_cpu_data.initial_apicid >> index_msb; | ||
126 | } | ||
127 | |||
128 | static void x2apic_send_IPI_self(int vector) | ||
129 | { | ||
130 | apic_write(APIC_SELF_IPI, vector); | ||
131 | } | ||
132 | |||
133 | static void init_x2apic_ldr(void) | ||
134 | { | ||
135 | int cpu = smp_processor_id(); | ||
136 | |||
137 | per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | struct genapic apic_x2apic_cluster = { | ||
142 | .name = "cluster x2apic", | ||
143 | .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, | ||
144 | .int_delivery_mode = dest_LowestPrio, | ||
145 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), | ||
146 | .target_cpus = x2apic_target_cpus, | ||
147 | .vector_allocation_domain = x2apic_vector_allocation_domain, | ||
148 | .apic_id_registered = x2apic_apic_id_registered, | ||
149 | .init_apic_ldr = init_x2apic_ldr, | ||
150 | .send_IPI_all = x2apic_send_IPI_all, | ||
151 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | ||
152 | .send_IPI_mask = x2apic_send_IPI_mask, | ||
153 | .send_IPI_self = x2apic_send_IPI_self, | ||
154 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | ||
155 | .phys_pkg_id = phys_pkg_id, | ||
156 | .get_apic_id = get_apic_id, | ||
157 | .set_apic_id = set_apic_id, | ||
158 | .apic_id_mask = (0xFFFFFFFFu), | ||
159 | }; | ||
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c new file mode 100644 index 000000000000..8f1343df2627 --- /dev/null +++ b/arch/x86/kernel/genx2apic_phys.c | |||
@@ -0,0 +1,154 @@ | |||
1 | #include <linux/threads.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/string.h> | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/ctype.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | ||
8 | |||
9 | #include <asm/smp.h> | ||
10 | #include <asm/ipi.h> | ||
11 | #include <asm/genapic.h> | ||
12 | |||
13 | static int x2apic_phys; | ||
14 | |||
15 | static int set_x2apic_phys_mode(char *arg) | ||
16 | { | ||
17 | x2apic_phys = 1; | ||
18 | return 0; | ||
19 | } | ||
20 | early_param("x2apic_phys", set_x2apic_phys_mode); | ||
21 | |||
22 | static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
23 | { | ||
24 | if (cpu_has_x2apic && x2apic_phys) | ||
25 | return 1; | ||
26 | |||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
31 | |||
32 | static cpumask_t x2apic_target_cpus(void) | ||
33 | { | ||
34 | return cpumask_of_cpu(0); | ||
35 | } | ||
36 | |||
37 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | ||
38 | { | ||
39 | cpumask_t domain = CPU_MASK_NONE; | ||
40 | cpu_set(cpu, domain); | ||
41 | return domain; | ||
42 | } | ||
43 | |||
44 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | ||
45 | unsigned int dest) | ||
46 | { | ||
47 | unsigned long cfg; | ||
48 | |||
49 | cfg = __prepare_ICR(0, vector, dest); | ||
50 | |||
51 | /* | ||
52 | * send the IPI. | ||
53 | */ | ||
54 | x2apic_icr_write(cfg, apicid); | ||
55 | } | ||
56 | |||
57 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | unsigned long query_cpu; | ||
61 | |||
62 | local_irq_save(flags); | ||
63 | for_each_cpu_mask(query_cpu, mask) { | ||
64 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), | ||
65 | vector, APIC_DEST_PHYSICAL); | ||
66 | } | ||
67 | local_irq_restore(flags); | ||
68 | } | ||
69 | |||
70 | static void x2apic_send_IPI_allbutself(int vector) | ||
71 | { | ||
72 | cpumask_t mask = cpu_online_map; | ||
73 | |||
74 | cpu_clear(smp_processor_id(), mask); | ||
75 | |||
76 | if (!cpus_empty(mask)) | ||
77 | x2apic_send_IPI_mask(mask, vector); | ||
78 | } | ||
79 | |||
80 | static void x2apic_send_IPI_all(int vector) | ||
81 | { | ||
82 | x2apic_send_IPI_mask(cpu_online_map, vector); | ||
83 | } | ||
84 | |||
85 | static int x2apic_apic_id_registered(void) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | ||
91 | { | ||
92 | int cpu; | ||
93 | |||
94 | /* | ||
95 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
96 | * May as well be the first. | ||
97 | */ | ||
98 | cpu = first_cpu(cpumask); | ||
99 | if ((unsigned)cpu < NR_CPUS) | ||
100 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
101 | else | ||
102 | return BAD_APICID; | ||
103 | } | ||
104 | |||
105 | static unsigned int get_apic_id(unsigned long x) | ||
106 | { | ||
107 | unsigned int id; | ||
108 | |||
109 | id = x; | ||
110 | return id; | ||
111 | } | ||
112 | |||
113 | static unsigned long set_apic_id(unsigned int id) | ||
114 | { | ||
115 | unsigned long x; | ||
116 | |||
117 | x = id; | ||
118 | return x; | ||
119 | } | ||
120 | |||
121 | static unsigned int phys_pkg_id(int index_msb) | ||
122 | { | ||
123 | return current_cpu_data.initial_apicid >> index_msb; | ||
124 | } | ||
125 | |||
126 | void x2apic_send_IPI_self(int vector) | ||
127 | { | ||
128 | apic_write(APIC_SELF_IPI, vector); | ||
129 | } | ||
130 | |||
131 | void init_x2apic_ldr(void) | ||
132 | { | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | struct genapic apic_x2apic_phys = { | ||
137 | .name = "physical x2apic", | ||
138 | .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, | ||
139 | .int_delivery_mode = dest_Fixed, | ||
140 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | ||
141 | .target_cpus = x2apic_target_cpus, | ||
142 | .vector_allocation_domain = x2apic_vector_allocation_domain, | ||
143 | .apic_id_registered = x2apic_apic_id_registered, | ||
144 | .init_apic_ldr = init_x2apic_ldr, | ||
145 | .send_IPI_all = x2apic_send_IPI_all, | ||
146 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | ||
147 | .send_IPI_mask = x2apic_send_IPI_mask, | ||
148 | .send_IPI_self = x2apic_send_IPI_self, | ||
149 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | ||
150 | .phys_pkg_id = phys_pkg_id, | ||
151 | .get_apic_id = get_apic_id, | ||
152 | .set_apic_id = set_apic_id, | ||
153 | .apic_id_mask = (0xFFFFFFFFu), | ||
154 | }; | ||
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index bfa837cb16be..ae2ffc8a400c 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -12,12 +12,12 @@ | |||
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/cpumask.h> | 13 | #include <linux/cpumask.h> |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/kernel.h> | ||
16 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
19 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
20 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/hardirq.h> | ||
21 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
22 | #include <asm/ipi.h> | 22 | #include <asm/ipi.h> |
23 | #include <asm/genapic.h> | 23 | #include <asm/genapic.h> |
@@ -26,6 +26,36 @@ | |||
26 | #include <asm/uv/uv_hub.h> | 26 | #include <asm/uv/uv_hub.h> |
27 | #include <asm/uv/bios.h> | 27 | #include <asm/uv/bios.h> |
28 | 28 | ||
29 | DEFINE_PER_CPU(int, x2apic_extra_bits); | ||
30 | |||
31 | static enum uv_system_type uv_system_type; | ||
32 | |||
33 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
34 | { | ||
35 | if (!strcmp(oem_id, "SGI")) { | ||
36 | if (!strcmp(oem_table_id, "UVL")) | ||
37 | uv_system_type = UV_LEGACY_APIC; | ||
38 | else if (!strcmp(oem_table_id, "UVX")) | ||
39 | uv_system_type = UV_X2APIC; | ||
40 | else if (!strcmp(oem_table_id, "UVH")) { | ||
41 | uv_system_type = UV_NON_UNIQUE_APIC; | ||
42 | return 1; | ||
43 | } | ||
44 | } | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | enum uv_system_type get_uv_system_type(void) | ||
49 | { | ||
50 | return uv_system_type; | ||
51 | } | ||
52 | |||
53 | int is_uv_system(void) | ||
54 | { | ||
55 | return uv_system_type != UV_NONE; | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(is_uv_system); | ||
58 | |||
29 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 59 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
30 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); | 60 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); |
31 | 61 | ||
@@ -123,6 +153,10 @@ static int uv_apic_id_registered(void) | |||
123 | return 1; | 153 | return 1; |
124 | } | 154 | } |
125 | 155 | ||
156 | static void uv_init_apic_ldr(void) | ||
157 | { | ||
158 | } | ||
159 | |||
126 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | 160 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) |
127 | { | 161 | { |
128 | int cpu; | 162 | int cpu; |
@@ -138,9 +172,34 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
138 | return BAD_APICID; | 172 | return BAD_APICID; |
139 | } | 173 | } |
140 | 174 | ||
175 | static unsigned int get_apic_id(unsigned long x) | ||
176 | { | ||
177 | unsigned int id; | ||
178 | |||
179 | WARN_ON(preemptible() && num_online_cpus() > 1); | ||
180 | id = x | __get_cpu_var(x2apic_extra_bits); | ||
181 | |||
182 | return id; | ||
183 | } | ||
184 | |||
185 | static unsigned long set_apic_id(unsigned int id) | ||
186 | { | ||
187 | unsigned long x; | ||
188 | |||
189 | /* maskout x2apic_extra_bits ? */ | ||
190 | x = id; | ||
191 | return x; | ||
192 | } | ||
193 | |||
194 | static unsigned int uv_read_apic_id(void) | ||
195 | { | ||
196 | |||
197 | return get_apic_id(apic_read(APIC_ID)); | ||
198 | } | ||
199 | |||
141 | static unsigned int phys_pkg_id(int index_msb) | 200 | static unsigned int phys_pkg_id(int index_msb) |
142 | { | 201 | { |
143 | return GET_APIC_ID(read_apic_id()) >> index_msb; | 202 | return uv_read_apic_id() >> index_msb; |
144 | } | 203 | } |
145 | 204 | ||
146 | #ifdef ZZZ /* Needs x2apic patch */ | 205 | #ifdef ZZZ /* Needs x2apic patch */ |
@@ -152,17 +211,22 @@ static void uv_send_IPI_self(int vector) | |||
152 | 211 | ||
153 | struct genapic apic_x2apic_uv_x = { | 212 | struct genapic apic_x2apic_uv_x = { |
154 | .name = "UV large system", | 213 | .name = "UV large system", |
214 | .acpi_madt_oem_check = uv_acpi_madt_oem_check, | ||
155 | .int_delivery_mode = dest_Fixed, | 215 | .int_delivery_mode = dest_Fixed, |
156 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | 216 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), |
157 | .target_cpus = uv_target_cpus, | 217 | .target_cpus = uv_target_cpus, |
158 | .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ | 218 | .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ |
159 | .apic_id_registered = uv_apic_id_registered, | 219 | .apic_id_registered = uv_apic_id_registered, |
220 | .init_apic_ldr = uv_init_apic_ldr, | ||
160 | .send_IPI_all = uv_send_IPI_all, | 221 | .send_IPI_all = uv_send_IPI_all, |
161 | .send_IPI_allbutself = uv_send_IPI_allbutself, | 222 | .send_IPI_allbutself = uv_send_IPI_allbutself, |
162 | .send_IPI_mask = uv_send_IPI_mask, | 223 | .send_IPI_mask = uv_send_IPI_mask, |
163 | /* ZZZ.send_IPI_self = uv_send_IPI_self, */ | 224 | /* ZZZ.send_IPI_self = uv_send_IPI_self, */ |
164 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | 225 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, |
165 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ | 226 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ |
227 | .get_apic_id = get_apic_id, | ||
228 | .set_apic_id = set_apic_id, | ||
229 | .apic_id_mask = (0xFFFFFFFFu), | ||
166 | }; | 230 | }; |
167 | 231 | ||
168 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 232 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
@@ -401,3 +465,5 @@ void __cpuinit uv_cpu_init(void) | |||
401 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | 465 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) |
402 | set_x2apic_extra_bits(uv_hub_info->pnode); | 466 | set_x2apic_extra_bits(uv_hub_info->pnode); |
403 | } | 467 | } |
468 | |||
469 | |||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index eb9ddd8efb82..45723f1fe198 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -21,9 +21,12 @@ | |||
21 | # include <asm/sigcontext32.h> | 21 | # include <asm/sigcontext32.h> |
22 | # include <asm/user32.h> | 22 | # include <asm/user32.h> |
23 | #else | 23 | #else |
24 | # define save_i387_ia32 save_i387 | 24 | # define save_i387_xstate_ia32 save_i387_xstate |
25 | # define restore_i387_ia32 restore_i387 | 25 | # define restore_i387_xstate_ia32 restore_i387_xstate |
26 | # define _fpstate_ia32 _fpstate | 26 | # define _fpstate_ia32 _fpstate |
27 | # define _xstate_ia32 _xstate | ||
28 | # define sig_xstate_ia32_size sig_xstate_size | ||
29 | # define fx_sw_reserved_ia32 fx_sw_reserved | ||
27 | # define user_i387_ia32_struct user_i387_struct | 30 | # define user_i387_ia32_struct user_i387_struct |
28 | # define user32_fxsr_struct user_fxsr_struct | 31 | # define user32_fxsr_struct user_fxsr_struct |
29 | #endif | 32 | #endif |
@@ -36,6 +39,7 @@ | |||
36 | 39 | ||
37 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 40 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
38 | unsigned int xstate_size; | 41 | unsigned int xstate_size; |
42 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); | ||
39 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 43 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; |
40 | 44 | ||
41 | void __cpuinit mxcsr_feature_mask_init(void) | 45 | void __cpuinit mxcsr_feature_mask_init(void) |
@@ -61,6 +65,11 @@ void __init init_thread_xstate(void) | |||
61 | return; | 65 | return; |
62 | } | 66 | } |
63 | 67 | ||
68 | if (cpu_has_xsave) { | ||
69 | xsave_cntxt_init(); | ||
70 | return; | ||
71 | } | ||
72 | |||
64 | if (cpu_has_fxsr) | 73 | if (cpu_has_fxsr) |
65 | xstate_size = sizeof(struct i387_fxsave_struct); | 74 | xstate_size = sizeof(struct i387_fxsave_struct); |
66 | #ifdef CONFIG_X86_32 | 75 | #ifdef CONFIG_X86_32 |
@@ -83,9 +92,19 @@ void __cpuinit fpu_init(void) | |||
83 | 92 | ||
84 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 93 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ |
85 | 94 | ||
95 | /* | ||
96 | * Boot processor to setup the FP and extended state context info. | ||
97 | */ | ||
98 | if (!smp_processor_id()) | ||
99 | init_thread_xstate(); | ||
100 | xsave_init(); | ||
101 | |||
86 | mxcsr_feature_mask_init(); | 102 | mxcsr_feature_mask_init(); |
87 | /* clean state in init */ | 103 | /* clean state in init */ |
88 | current_thread_info()->status = 0; | 104 | if (cpu_has_xsave) |
105 | current_thread_info()->status = TS_XSAVE; | ||
106 | else | ||
107 | current_thread_info()->status = 0; | ||
89 | clear_used_math(); | 108 | clear_used_math(); |
90 | } | 109 | } |
91 | #endif /* CONFIG_X86_64 */ | 110 | #endif /* CONFIG_X86_64 */ |
@@ -195,6 +214,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
195 | */ | 214 | */ |
196 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 215 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; |
197 | 216 | ||
217 | /* | ||
218 | * update the header bits in the xsave header, indicating the | ||
219 | * presence of FP and SSE state. | ||
220 | */ | ||
221 | if (cpu_has_xsave) | ||
222 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | ||
223 | |||
198 | return ret; | 224 | return ret; |
199 | } | 225 | } |
200 | 226 | ||
@@ -395,6 +421,12 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
395 | if (!ret) | 421 | if (!ret) |
396 | convert_to_fxsr(target, &env); | 422 | convert_to_fxsr(target, &env); |
397 | 423 | ||
424 | /* | ||
425 | * update the header bit in the xsave header, indicating the | ||
426 | * presence of FP. | ||
427 | */ | ||
428 | if (cpu_has_xsave) | ||
429 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; | ||
398 | return ret; | 430 | return ret; |
399 | } | 431 | } |
400 | 432 | ||
@@ -407,7 +439,6 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
407 | struct task_struct *tsk = current; | 439 | struct task_struct *tsk = current; |
408 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 440 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; |
409 | 441 | ||
410 | unlazy_fpu(tsk); | ||
411 | fp->status = fp->swd; | 442 | fp->status = fp->swd; |
412 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) | 443 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) |
413 | return -1; | 444 | return -1; |
@@ -421,8 +452,6 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
421 | struct user_i387_ia32_struct env; | 452 | struct user_i387_ia32_struct env; |
422 | int err = 0; | 453 | int err = 0; |
423 | 454 | ||
424 | unlazy_fpu(tsk); | ||
425 | |||
426 | convert_from_fxsr(&env, tsk); | 455 | convert_from_fxsr(&env, tsk); |
427 | if (__copy_to_user(buf, &env, sizeof(env))) | 456 | if (__copy_to_user(buf, &env, sizeof(env))) |
428 | return -1; | 457 | return -1; |
@@ -432,16 +461,40 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
432 | if (err) | 461 | if (err) |
433 | return -1; | 462 | return -1; |
434 | 463 | ||
435 | if (__copy_to_user(&buf->_fxsr_env[0], fx, | 464 | if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size)) |
436 | sizeof(struct i387_fxsave_struct))) | ||
437 | return -1; | 465 | return -1; |
438 | return 1; | 466 | return 1; |
439 | } | 467 | } |
440 | 468 | ||
441 | int save_i387_ia32(struct _fpstate_ia32 __user *buf) | 469 | static int save_i387_xsave(void __user *buf) |
470 | { | ||
471 | struct _fpstate_ia32 __user *fx = buf; | ||
472 | int err = 0; | ||
473 | |||
474 | if (save_i387_fxsave(fx) < 0) | ||
475 | return -1; | ||
476 | |||
477 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32, | ||
478 | sizeof(struct _fpx_sw_bytes)); | ||
479 | err |= __put_user(FP_XSTATE_MAGIC2, | ||
480 | (__u32 __user *) (buf + sig_xstate_ia32_size | ||
481 | - FP_XSTATE_MAGIC2_SIZE)); | ||
482 | if (err) | ||
483 | return -1; | ||
484 | |||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | int save_i387_xstate_ia32(void __user *buf) | ||
442 | { | 489 | { |
490 | struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; | ||
491 | struct task_struct *tsk = current; | ||
492 | |||
443 | if (!used_math()) | 493 | if (!used_math()) |
444 | return 0; | 494 | return 0; |
495 | |||
496 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size)) | ||
497 | return -EACCES; | ||
445 | /* | 498 | /* |
446 | * This will cause a "finit" to be triggered by the next | 499 | * This will cause a "finit" to be triggered by the next |
447 | * attempted FPU operation by the 'current' process. | 500 | * attempted FPU operation by the 'current' process. |
@@ -451,13 +504,17 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
451 | if (!HAVE_HWFP) { | 504 | if (!HAVE_HWFP) { |
452 | return fpregs_soft_get(current, NULL, | 505 | return fpregs_soft_get(current, NULL, |
453 | 0, sizeof(struct user_i387_ia32_struct), | 506 | 0, sizeof(struct user_i387_ia32_struct), |
454 | NULL, buf) ? -1 : 1; | 507 | NULL, fp) ? -1 : 1; |
455 | } | 508 | } |
456 | 509 | ||
510 | unlazy_fpu(tsk); | ||
511 | |||
512 | if (cpu_has_xsave) | ||
513 | return save_i387_xsave(fp); | ||
457 | if (cpu_has_fxsr) | 514 | if (cpu_has_fxsr) |
458 | return save_i387_fxsave(buf); | 515 | return save_i387_fxsave(fp); |
459 | else | 516 | else |
460 | return save_i387_fsave(buf); | 517 | return save_i387_fsave(fp); |
461 | } | 518 | } |
462 | 519 | ||
463 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | 520 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) |
@@ -468,14 +525,15 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
468 | sizeof(struct i387_fsave_struct)); | 525 | sizeof(struct i387_fsave_struct)); |
469 | } | 526 | } |
470 | 527 | ||
471 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | 528 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, |
529 | unsigned int size) | ||
472 | { | 530 | { |
473 | struct task_struct *tsk = current; | 531 | struct task_struct *tsk = current; |
474 | struct user_i387_ia32_struct env; | 532 | struct user_i387_ia32_struct env; |
475 | int err; | 533 | int err; |
476 | 534 | ||
477 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], | 535 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], |
478 | sizeof(struct i387_fxsave_struct)); | 536 | size); |
479 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 537 | /* mxcsr reserved bits must be masked to zero for security reasons */ |
480 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 538 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; |
481 | if (err || __copy_from_user(&env, buf, sizeof(env))) | 539 | if (err || __copy_from_user(&env, buf, sizeof(env))) |
@@ -485,14 +543,69 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
485 | return 0; | 543 | return 0; |
486 | } | 544 | } |
487 | 545 | ||
488 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | 546 | static int restore_i387_xsave(void __user *buf) |
547 | { | ||
548 | struct _fpx_sw_bytes fx_sw_user; | ||
549 | struct _fpstate_ia32 __user *fx_user = | ||
550 | ((struct _fpstate_ia32 __user *) buf); | ||
551 | struct i387_fxsave_struct __user *fx = | ||
552 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; | ||
553 | struct xsave_hdr_struct *xsave_hdr = | ||
554 | ¤t->thread.xstate->xsave.xsave_hdr; | ||
555 | u64 mask; | ||
556 | int err; | ||
557 | |||
558 | if (check_for_xstate(fx, buf, &fx_sw_user)) | ||
559 | goto fx_only; | ||
560 | |||
561 | mask = fx_sw_user.xstate_bv; | ||
562 | |||
563 | err = restore_i387_fxsave(buf, fx_sw_user.xstate_size); | ||
564 | |||
565 | xsave_hdr->xstate_bv &= pcntxt_mask; | ||
566 | /* | ||
567 | * These bits must be zero. | ||
568 | */ | ||
569 | xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; | ||
570 | |||
571 | /* | ||
572 | * Init the state that is not present in the memory layout | ||
573 | * and enabled by the OS. | ||
574 | */ | ||
575 | mask = ~(pcntxt_mask & ~mask); | ||
576 | xsave_hdr->xstate_bv &= mask; | ||
577 | |||
578 | return err; | ||
579 | fx_only: | ||
580 | /* | ||
581 | * Couldn't find the extended state information in the memory | ||
582 | * layout. Restore the FP/SSE and init the other extended state | ||
583 | * enabled by the OS. | ||
584 | */ | ||
585 | xsave_hdr->xstate_bv = XSTATE_FPSSE; | ||
586 | return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct)); | ||
587 | } | ||
588 | |||
589 | int restore_i387_xstate_ia32(void __user *buf) | ||
489 | { | 590 | { |
490 | int err; | 591 | int err; |
491 | struct task_struct *tsk = current; | 592 | struct task_struct *tsk = current; |
593 | struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; | ||
492 | 594 | ||
493 | if (HAVE_HWFP) | 595 | if (HAVE_HWFP) |
494 | clear_fpu(tsk); | 596 | clear_fpu(tsk); |
495 | 597 | ||
598 | if (!buf) { | ||
599 | if (used_math()) { | ||
600 | clear_fpu(tsk); | ||
601 | clear_used_math(); | ||
602 | } | ||
603 | |||
604 | return 0; | ||
605 | } else | ||
606 | if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size)) | ||
607 | return -EACCES; | ||
608 | |||
496 | if (!used_math()) { | 609 | if (!used_math()) { |
497 | err = init_fpu(tsk); | 610 | err = init_fpu(tsk); |
498 | if (err) | 611 | if (err) |
@@ -500,14 +613,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
500 | } | 613 | } |
501 | 614 | ||
502 | if (HAVE_HWFP) { | 615 | if (HAVE_HWFP) { |
503 | if (cpu_has_fxsr) | 616 | if (cpu_has_xsave) |
504 | err = restore_i387_fxsave(buf); | 617 | err = restore_i387_xsave(buf); |
618 | else if (cpu_has_fxsr) | ||
619 | err = restore_i387_fxsave(fp, sizeof(struct | ||
620 | i387_fxsave_struct)); | ||
505 | else | 621 | else |
506 | err = restore_i387_fsave(buf); | 622 | err = restore_i387_fsave(fp); |
507 | } else { | 623 | } else { |
508 | err = fpregs_soft_set(current, NULL, | 624 | err = fpregs_soft_set(current, NULL, |
509 | 0, sizeof(struct user_i387_ia32_struct), | 625 | 0, sizeof(struct user_i387_ia32_struct), |
510 | NULL, buf) != 0; | 626 | NULL, fp) != 0; |
511 | } | 627 | } |
512 | set_used_math(); | 628 | set_used_math(); |
513 | 629 | ||
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index dc92b49d9204..4b8a53d841f7 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -282,6 +282,30 @@ static int __init i8259A_init_sysfs(void) | |||
282 | 282 | ||
283 | device_initcall(i8259A_init_sysfs); | 283 | device_initcall(i8259A_init_sysfs); |
284 | 284 | ||
285 | void mask_8259A(void) | ||
286 | { | ||
287 | unsigned long flags; | ||
288 | |||
289 | spin_lock_irqsave(&i8259A_lock, flags); | ||
290 | |||
291 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | ||
292 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ | ||
293 | |||
294 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
295 | } | ||
296 | |||
297 | void unmask_8259A(void) | ||
298 | { | ||
299 | unsigned long flags; | ||
300 | |||
301 | spin_lock_irqsave(&i8259A_lock, flags); | ||
302 | |||
303 | outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ | ||
304 | outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ | ||
305 | |||
306 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
307 | } | ||
308 | |||
285 | void init_8259A(int auto_eoi) | 309 | void init_8259A(int auto_eoi) |
286 | { | 310 | { |
287 | unsigned long flags; | 311 | unsigned long flags; |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 09cddb57bec4..e710289f673e 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -46,10 +46,13 @@ | |||
46 | #include <asm/nmi.h> | 46 | #include <asm/nmi.h> |
47 | #include <asm/msidef.h> | 47 | #include <asm/msidef.h> |
48 | #include <asm/hypertransport.h> | 48 | #include <asm/hypertransport.h> |
49 | #include <asm/setup.h> | ||
49 | 50 | ||
50 | #include <mach_apic.h> | 51 | #include <mach_apic.h> |
51 | #include <mach_apicdef.h> | 52 | #include <mach_apicdef.h> |
52 | 53 | ||
54 | #define __apicdebuginit(type) static type __init | ||
55 | |||
53 | int (*ioapic_renumber_irq)(int ioapic, int irq); | 56 | int (*ioapic_renumber_irq)(int ioapic, int irq); |
54 | atomic_t irq_mis_count; | 57 | atomic_t irq_mis_count; |
55 | 58 | ||
@@ -1341,7 +1344,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, | |||
1341 | ioapic_write_entry(apic, pin, entry); | 1344 | ioapic_write_entry(apic, pin, entry); |
1342 | } | 1345 | } |
1343 | 1346 | ||
1344 | void __init print_IO_APIC(void) | 1347 | |
1348 | __apicdebuginit(void) print_IO_APIC(void) | ||
1345 | { | 1349 | { |
1346 | int apic, i; | 1350 | int apic, i; |
1347 | union IO_APIC_reg_00 reg_00; | 1351 | union IO_APIC_reg_00 reg_00; |
@@ -1456,9 +1460,7 @@ void __init print_IO_APIC(void) | |||
1456 | return; | 1460 | return; |
1457 | } | 1461 | } |
1458 | 1462 | ||
1459 | #if 0 | 1463 | __apicdebuginit(void) print_APIC_bitfield(int base) |
1460 | |||
1461 | static void print_APIC_bitfield(int base) | ||
1462 | { | 1464 | { |
1463 | unsigned int v; | 1465 | unsigned int v; |
1464 | int i, j; | 1466 | int i, j; |
@@ -1479,9 +1481,10 @@ static void print_APIC_bitfield(int base) | |||
1479 | } | 1481 | } |
1480 | } | 1482 | } |
1481 | 1483 | ||
1482 | void /*__init*/ print_local_APIC(void *dummy) | 1484 | __apicdebuginit(void) print_local_APIC(void *dummy) |
1483 | { | 1485 | { |
1484 | unsigned int v, ver, maxlvt; | 1486 | unsigned int v, ver, maxlvt; |
1487 | u64 icr; | ||
1485 | 1488 | ||
1486 | if (apic_verbosity == APIC_QUIET) | 1489 | if (apic_verbosity == APIC_QUIET) |
1487 | return; | 1490 | return; |
@@ -1490,7 +1493,7 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
1490 | smp_processor_id(), hard_smp_processor_id()); | 1493 | smp_processor_id(), hard_smp_processor_id()); |
1491 | v = apic_read(APIC_ID); | 1494 | v = apic_read(APIC_ID); |
1492 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, | 1495 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, |
1493 | GET_APIC_ID(read_apic_id())); | 1496 | GET_APIC_ID(v)); |
1494 | v = apic_read(APIC_LVR); | 1497 | v = apic_read(APIC_LVR); |
1495 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1498 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1496 | ver = GET_APIC_VERSION(v); | 1499 | ver = GET_APIC_VERSION(v); |
@@ -1532,10 +1535,9 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
1532 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | 1535 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); |
1533 | } | 1536 | } |
1534 | 1537 | ||
1535 | v = apic_read(APIC_ICR); | 1538 | icr = apic_icr_read(); |
1536 | printk(KERN_DEBUG "... APIC ICR: %08x\n", v); | 1539 | printk(KERN_DEBUG "... APIC ICR: %08x\n", icr); |
1537 | v = apic_read(APIC_ICR2); | 1540 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32); |
1538 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", v); | ||
1539 | 1541 | ||
1540 | v = apic_read(APIC_LVTT); | 1542 | v = apic_read(APIC_LVTT); |
1541 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); | 1543 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); |
@@ -1563,12 +1565,12 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
1563 | printk("\n"); | 1565 | printk("\n"); |
1564 | } | 1566 | } |
1565 | 1567 | ||
1566 | void print_all_local_APICs(void) | 1568 | __apicdebuginit(void) print_all_local_APICs(void) |
1567 | { | 1569 | { |
1568 | on_each_cpu(print_local_APIC, NULL, 1); | 1570 | on_each_cpu(print_local_APIC, NULL, 1); |
1569 | } | 1571 | } |
1570 | 1572 | ||
1571 | void /*__init*/ print_PIC(void) | 1573 | __apicdebuginit(void) print_PIC(void) |
1572 | { | 1574 | { |
1573 | unsigned int v; | 1575 | unsigned int v; |
1574 | unsigned long flags; | 1576 | unsigned long flags; |
@@ -1600,7 +1602,17 @@ void /*__init*/ print_PIC(void) | |||
1600 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | 1602 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); |
1601 | } | 1603 | } |
1602 | 1604 | ||
1603 | #endif /* 0 */ | 1605 | __apicdebuginit(int) print_all_ICs(void) |
1606 | { | ||
1607 | print_PIC(); | ||
1608 | print_all_local_APICs(); | ||
1609 | print_IO_APIC(); | ||
1610 | |||
1611 | return 0; | ||
1612 | } | ||
1613 | |||
1614 | fs_initcall(print_all_ICs); | ||
1615 | |||
1604 | 1616 | ||
1605 | static void __init enable_IO_APIC(void) | 1617 | static void __init enable_IO_APIC(void) |
1606 | { | 1618 | { |
@@ -1698,8 +1710,7 @@ void disable_IO_APIC(void) | |||
1698 | entry.dest_mode = 0; /* Physical */ | 1710 | entry.dest_mode = 0; /* Physical */ |
1699 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1711 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1700 | entry.vector = 0; | 1712 | entry.vector = 0; |
1701 | entry.dest.physical.physical_dest = | 1713 | entry.dest.physical.physical_dest = read_apic_id(); |
1702 | GET_APIC_ID(read_apic_id()); | ||
1703 | 1714 | ||
1704 | /* | 1715 | /* |
1705 | * Add it to the IO-APIC irq-routing table: | 1716 | * Add it to the IO-APIC irq-routing table: |
@@ -1725,10 +1736,8 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
1725 | unsigned char old_id; | 1736 | unsigned char old_id; |
1726 | unsigned long flags; | 1737 | unsigned long flags; |
1727 | 1738 | ||
1728 | #ifdef CONFIG_X86_NUMAQ | 1739 | if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) |
1729 | if (found_numaq) | ||
1730 | return; | 1740 | return; |
1731 | #endif | ||
1732 | 1741 | ||
1733 | /* | 1742 | /* |
1734 | * Don't check I/O APIC IDs for xAPIC systems. They have | 1743 | * Don't check I/O APIC IDs for xAPIC systems. They have |
@@ -2329,8 +2338,6 @@ void __init setup_IO_APIC(void) | |||
2329 | setup_IO_APIC_irqs(); | 2338 | setup_IO_APIC_irqs(); |
2330 | init_IO_APIC_traps(); | 2339 | init_IO_APIC_traps(); |
2331 | check_timer(); | 2340 | check_timer(); |
2332 | if (!acpi_ioapic) | ||
2333 | print_IO_APIC(); | ||
2334 | } | 2341 | } |
2335 | 2342 | ||
2336 | /* | 2343 | /* |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 61a83b70c18f..a1bec2969c6a 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <acpi/acpi_bus.h> | 37 | #include <acpi/acpi_bus.h> |
38 | #endif | 38 | #endif |
39 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
40 | #include <linux/dmar.h> | ||
40 | 41 | ||
41 | #include <asm/idle.h> | 42 | #include <asm/idle.h> |
42 | #include <asm/io.h> | 43 | #include <asm/io.h> |
@@ -49,10 +50,13 @@ | |||
49 | #include <asm/nmi.h> | 50 | #include <asm/nmi.h> |
50 | #include <asm/msidef.h> | 51 | #include <asm/msidef.h> |
51 | #include <asm/hypertransport.h> | 52 | #include <asm/hypertransport.h> |
53 | #include <asm/irq_remapping.h> | ||
52 | 54 | ||
53 | #include <mach_ipi.h> | 55 | #include <mach_ipi.h> |
54 | #include <mach_apic.h> | 56 | #include <mach_apic.h> |
55 | 57 | ||
58 | #define __apicdebuginit(type) static type __init | ||
59 | |||
56 | struct irq_cfg { | 60 | struct irq_cfg { |
57 | cpumask_t domain; | 61 | cpumask_t domain; |
58 | cpumask_t old_domain; | 62 | cpumask_t old_domain; |
@@ -87,8 +91,6 @@ int first_system_vector = 0xfe; | |||
87 | 91 | ||
88 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | 92 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; |
89 | 93 | ||
90 | #define __apicdebuginit __init | ||
91 | |||
92 | int sis_apic_bug; /* not actually supported, dummy for compile */ | 94 | int sis_apic_bug; /* not actually supported, dummy for compile */ |
93 | 95 | ||
94 | static int no_timer_check; | 96 | static int no_timer_check; |
@@ -108,6 +110,9 @@ static DEFINE_SPINLOCK(vector_lock); | |||
108 | */ | 110 | */ |
109 | int nr_ioapic_registers[MAX_IO_APICS]; | 111 | int nr_ioapic_registers[MAX_IO_APICS]; |
110 | 112 | ||
113 | /* I/O APIC RTE contents at the OS boot up */ | ||
114 | struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; | ||
115 | |||
111 | /* I/O APIC entries */ | 116 | /* I/O APIC entries */ |
112 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; | 117 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; |
113 | int nr_ioapics; | 118 | int nr_ioapics; |
@@ -303,7 +308,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
303 | pin = entry->pin; | 308 | pin = entry->pin; |
304 | if (pin == -1) | 309 | if (pin == -1) |
305 | break; | 310 | break; |
306 | io_apic_write(apic, 0x11 + pin*2, dest); | 311 | /* |
312 | * With interrupt-remapping, destination information comes | ||
313 | * from interrupt-remapping table entry. | ||
314 | */ | ||
315 | if (!irq_remapped(irq)) | ||
316 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
307 | reg = io_apic_read(apic, 0x10 + pin*2); | 317 | reg = io_apic_read(apic, 0x10 + pin*2); |
308 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 318 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
309 | reg |= vector; | 319 | reg |= vector; |
@@ -440,6 +450,69 @@ static void clear_IO_APIC (void) | |||
440 | clear_IO_APIC_pin(apic, pin); | 450 | clear_IO_APIC_pin(apic, pin); |
441 | } | 451 | } |
442 | 452 | ||
453 | /* | ||
454 | * Saves and masks all the unmasked IO-APIC RTE's | ||
455 | */ | ||
456 | int save_mask_IO_APIC_setup(void) | ||
457 | { | ||
458 | union IO_APIC_reg_01 reg_01; | ||
459 | unsigned long flags; | ||
460 | int apic, pin; | ||
461 | |||
462 | /* | ||
463 | * The number of IO-APIC IRQ registers (== #pins): | ||
464 | */ | ||
465 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
466 | spin_lock_irqsave(&ioapic_lock, flags); | ||
467 | reg_01.raw = io_apic_read(apic, 1); | ||
468 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
469 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | ||
470 | } | ||
471 | |||
472 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
473 | early_ioapic_entries[apic] = | ||
474 | kzalloc(sizeof(struct IO_APIC_route_entry) * | ||
475 | nr_ioapic_registers[apic], GFP_KERNEL); | ||
476 | if (!early_ioapic_entries[apic]) | ||
477 | return -ENOMEM; | ||
478 | } | ||
479 | |||
480 | for (apic = 0; apic < nr_ioapics; apic++) | ||
481 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | ||
482 | struct IO_APIC_route_entry entry; | ||
483 | |||
484 | entry = early_ioapic_entries[apic][pin] = | ||
485 | ioapic_read_entry(apic, pin); | ||
486 | if (!entry.mask) { | ||
487 | entry.mask = 1; | ||
488 | ioapic_write_entry(apic, pin, entry); | ||
489 | } | ||
490 | } | ||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | void restore_IO_APIC_setup(void) | ||
495 | { | ||
496 | int apic, pin; | ||
497 | |||
498 | for (apic = 0; apic < nr_ioapics; apic++) | ||
499 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) | ||
500 | ioapic_write_entry(apic, pin, | ||
501 | early_ioapic_entries[apic][pin]); | ||
502 | } | ||
503 | |||
504 | void reinit_intr_remapped_IO_APIC(int intr_remapping) | ||
505 | { | ||
506 | /* | ||
507 | * for now plain restore of previous settings. | ||
508 | * TBD: In the case of OS enabling interrupt-remapping, | ||
509 | * IO-APIC RTE's need to be setup to point to interrupt-remapping | ||
510 | * table entries. for now, do a plain restore, and wait for | ||
511 | * the setup_IO_APIC_irqs() to do proper initialization. | ||
512 | */ | ||
513 | restore_IO_APIC_setup(); | ||
514 | } | ||
515 | |||
443 | int skip_ioapic_setup; | 516 | int skip_ioapic_setup; |
444 | int ioapic_force; | 517 | int ioapic_force; |
445 | 518 | ||
@@ -839,18 +912,98 @@ void __setup_vector_irq(int cpu) | |||
839 | } | 912 | } |
840 | 913 | ||
841 | static struct irq_chip ioapic_chip; | 914 | static struct irq_chip ioapic_chip; |
915 | #ifdef CONFIG_INTR_REMAP | ||
916 | static struct irq_chip ir_ioapic_chip; | ||
917 | #endif | ||
842 | 918 | ||
843 | static void ioapic_register_intr(int irq, unsigned long trigger) | 919 | static void ioapic_register_intr(int irq, unsigned long trigger) |
844 | { | 920 | { |
845 | if (trigger) { | 921 | if (trigger) |
846 | irq_desc[irq].status |= IRQ_LEVEL; | 922 | irq_desc[irq].status |= IRQ_LEVEL; |
847 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 923 | else |
848 | handle_fasteoi_irq, "fasteoi"); | ||
849 | } else { | ||
850 | irq_desc[irq].status &= ~IRQ_LEVEL; | 924 | irq_desc[irq].status &= ~IRQ_LEVEL; |
925 | |||
926 | #ifdef CONFIG_INTR_REMAP | ||
927 | if (irq_remapped(irq)) { | ||
928 | irq_desc[irq].status |= IRQ_MOVE_PCNTXT; | ||
929 | if (trigger) | ||
930 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
931 | handle_fasteoi_irq, | ||
932 | "fasteoi"); | ||
933 | else | ||
934 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
935 | handle_edge_irq, "edge"); | ||
936 | return; | ||
937 | } | ||
938 | #endif | ||
939 | if (trigger) | ||
940 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
941 | handle_fasteoi_irq, | ||
942 | "fasteoi"); | ||
943 | else | ||
851 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 944 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
852 | handle_edge_irq, "edge"); | 945 | handle_edge_irq, "edge"); |
946 | } | ||
947 | |||
948 | static int setup_ioapic_entry(int apic, int irq, | ||
949 | struct IO_APIC_route_entry *entry, | ||
950 | unsigned int destination, int trigger, | ||
951 | int polarity, int vector) | ||
952 | { | ||
953 | /* | ||
954 | * add it to the IO-APIC irq-routing table: | ||
955 | */ | ||
956 | memset(entry,0,sizeof(*entry)); | ||
957 | |||
958 | #ifdef CONFIG_INTR_REMAP | ||
959 | if (intr_remapping_enabled) { | ||
960 | struct intel_iommu *iommu = map_ioapic_to_ir(apic); | ||
961 | struct irte irte; | ||
962 | struct IR_IO_APIC_route_entry *ir_entry = | ||
963 | (struct IR_IO_APIC_route_entry *) entry; | ||
964 | int index; | ||
965 | |||
966 | if (!iommu) | ||
967 | panic("No mapping iommu for ioapic %d\n", apic); | ||
968 | |||
969 | index = alloc_irte(iommu, irq, 1); | ||
970 | if (index < 0) | ||
971 | panic("Failed to allocate IRTE for ioapic %d\n", apic); | ||
972 | |||
973 | memset(&irte, 0, sizeof(irte)); | ||
974 | |||
975 | irte.present = 1; | ||
976 | irte.dst_mode = INT_DEST_MODE; | ||
977 | irte.trigger_mode = trigger; | ||
978 | irte.dlvry_mode = INT_DELIVERY_MODE; | ||
979 | irte.vector = vector; | ||
980 | irte.dest_id = IRTE_DEST(destination); | ||
981 | |||
982 | modify_irte(irq, &irte); | ||
983 | |||
984 | ir_entry->index2 = (index >> 15) & 0x1; | ||
985 | ir_entry->zero = 0; | ||
986 | ir_entry->format = 1; | ||
987 | ir_entry->index = (index & 0x7fff); | ||
988 | } else | ||
989 | #endif | ||
990 | { | ||
991 | entry->delivery_mode = INT_DELIVERY_MODE; | ||
992 | entry->dest_mode = INT_DEST_MODE; | ||
993 | entry->dest = destination; | ||
853 | } | 994 | } |
995 | |||
996 | entry->mask = 0; /* enable IRQ */ | ||
997 | entry->trigger = trigger; | ||
998 | entry->polarity = polarity; | ||
999 | entry->vector = vector; | ||
1000 | |||
1001 | /* Mask level triggered irqs. | ||
1002 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | ||
1003 | */ | ||
1004 | if (trigger) | ||
1005 | entry->mask = 1; | ||
1006 | return 0; | ||
854 | } | 1007 | } |
855 | 1008 | ||
856 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | 1009 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, |
@@ -875,24 +1028,15 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | |||
875 | apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, | 1028 | apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, |
876 | irq, trigger, polarity); | 1029 | irq, trigger, polarity); |
877 | 1030 | ||
878 | /* | ||
879 | * add it to the IO-APIC irq-routing table: | ||
880 | */ | ||
881 | memset(&entry,0,sizeof(entry)); | ||
882 | |||
883 | entry.delivery_mode = INT_DELIVERY_MODE; | ||
884 | entry.dest_mode = INT_DEST_MODE; | ||
885 | entry.dest = cpu_mask_to_apicid(mask); | ||
886 | entry.mask = 0; /* enable IRQ */ | ||
887 | entry.trigger = trigger; | ||
888 | entry.polarity = polarity; | ||
889 | entry.vector = cfg->vector; | ||
890 | 1031 | ||
891 | /* Mask level triggered irqs. | 1032 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
892 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | 1033 | cpu_mask_to_apicid(mask), trigger, polarity, |
893 | */ | 1034 | cfg->vector)) { |
894 | if (trigger) | 1035 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
895 | entry.mask = 1; | 1036 | mp_ioapics[apic].mp_apicid, pin); |
1037 | __clear_irq_vector(irq); | ||
1038 | return; | ||
1039 | } | ||
896 | 1040 | ||
897 | ioapic_register_intr(irq, trigger); | 1041 | ioapic_register_intr(irq, trigger); |
898 | if (irq < 16) | 1042 | if (irq < 16) |
@@ -944,6 +1088,9 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, | |||
944 | { | 1088 | { |
945 | struct IO_APIC_route_entry entry; | 1089 | struct IO_APIC_route_entry entry; |
946 | 1090 | ||
1091 | if (intr_remapping_enabled) | ||
1092 | return; | ||
1093 | |||
947 | memset(&entry, 0, sizeof(entry)); | 1094 | memset(&entry, 0, sizeof(entry)); |
948 | 1095 | ||
949 | /* | 1096 | /* |
@@ -970,7 +1117,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, | |||
970 | ioapic_write_entry(apic, pin, entry); | 1117 | ioapic_write_entry(apic, pin, entry); |
971 | } | 1118 | } |
972 | 1119 | ||
973 | void __apicdebuginit print_IO_APIC(void) | 1120 | |
1121 | __apicdebuginit(void) print_IO_APIC(void) | ||
974 | { | 1122 | { |
975 | int apic, i; | 1123 | int apic, i; |
976 | union IO_APIC_reg_00 reg_00; | 1124 | union IO_APIC_reg_00 reg_00; |
@@ -1064,9 +1212,7 @@ void __apicdebuginit print_IO_APIC(void) | |||
1064 | return; | 1212 | return; |
1065 | } | 1213 | } |
1066 | 1214 | ||
1067 | #if 0 | 1215 | __apicdebuginit(void) print_APIC_bitfield(int base) |
1068 | |||
1069 | static __apicdebuginit void print_APIC_bitfield (int base) | ||
1070 | { | 1216 | { |
1071 | unsigned int v; | 1217 | unsigned int v; |
1072 | int i, j; | 1218 | int i, j; |
@@ -1087,9 +1233,10 @@ static __apicdebuginit void print_APIC_bitfield (int base) | |||
1087 | } | 1233 | } |
1088 | } | 1234 | } |
1089 | 1235 | ||
1090 | void __apicdebuginit print_local_APIC(void * dummy) | 1236 | __apicdebuginit(void) print_local_APIC(void *dummy) |
1091 | { | 1237 | { |
1092 | unsigned int v, ver, maxlvt; | 1238 | unsigned int v, ver, maxlvt; |
1239 | unsigned long icr; | ||
1093 | 1240 | ||
1094 | if (apic_verbosity == APIC_QUIET) | 1241 | if (apic_verbosity == APIC_QUIET) |
1095 | return; | 1242 | return; |
@@ -1097,7 +1244,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1097 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1244 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
1098 | smp_processor_id(), hard_smp_processor_id()); | 1245 | smp_processor_id(), hard_smp_processor_id()); |
1099 | v = apic_read(APIC_ID); | 1246 | v = apic_read(APIC_ID); |
1100 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id())); | 1247 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
1101 | v = apic_read(APIC_LVR); | 1248 | v = apic_read(APIC_LVR); |
1102 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1249 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1103 | ver = GET_APIC_VERSION(v); | 1250 | ver = GET_APIC_VERSION(v); |
@@ -1133,10 +1280,9 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1133 | v = apic_read(APIC_ESR); | 1280 | v = apic_read(APIC_ESR); |
1134 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | 1281 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); |
1135 | 1282 | ||
1136 | v = apic_read(APIC_ICR); | 1283 | icr = apic_icr_read(); |
1137 | printk(KERN_DEBUG "... APIC ICR: %08x\n", v); | 1284 | printk(KERN_DEBUG "... APIC ICR: %08x\n", icr); |
1138 | v = apic_read(APIC_ICR2); | 1285 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32); |
1139 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", v); | ||
1140 | 1286 | ||
1141 | v = apic_read(APIC_LVTT); | 1287 | v = apic_read(APIC_LVTT); |
1142 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); | 1288 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); |
@@ -1164,12 +1310,12 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1164 | printk("\n"); | 1310 | printk("\n"); |
1165 | } | 1311 | } |
1166 | 1312 | ||
1167 | void print_all_local_APICs (void) | 1313 | __apicdebuginit(void) print_all_local_APICs(void) |
1168 | { | 1314 | { |
1169 | on_each_cpu(print_local_APIC, NULL, 1); | 1315 | on_each_cpu(print_local_APIC, NULL, 1); |
1170 | } | 1316 | } |
1171 | 1317 | ||
1172 | void __apicdebuginit print_PIC(void) | 1318 | __apicdebuginit(void) print_PIC(void) |
1173 | { | 1319 | { |
1174 | unsigned int v; | 1320 | unsigned int v; |
1175 | unsigned long flags; | 1321 | unsigned long flags; |
@@ -1201,7 +1347,17 @@ void __apicdebuginit print_PIC(void) | |||
1201 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | 1347 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); |
1202 | } | 1348 | } |
1203 | 1349 | ||
1204 | #endif /* 0 */ | 1350 | __apicdebuginit(int) print_all_ICs(void) |
1351 | { | ||
1352 | print_PIC(); | ||
1353 | print_all_local_APICs(); | ||
1354 | print_IO_APIC(); | ||
1355 | |||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | fs_initcall(print_all_ICs); | ||
1360 | |||
1205 | 1361 | ||
1206 | void __init enable_IO_APIC(void) | 1362 | void __init enable_IO_APIC(void) |
1207 | { | 1363 | { |
@@ -1291,7 +1447,7 @@ void disable_IO_APIC(void) | |||
1291 | entry.dest_mode = 0; /* Physical */ | 1447 | entry.dest_mode = 0; /* Physical */ |
1292 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1448 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1293 | entry.vector = 0; | 1449 | entry.vector = 0; |
1294 | entry.dest = GET_APIC_ID(read_apic_id()); | 1450 | entry.dest = read_apic_id(); |
1295 | 1451 | ||
1296 | /* | 1452 | /* |
1297 | * Add it to the IO-APIC irq-routing table: | 1453 | * Add it to the IO-APIC irq-routing table: |
@@ -1397,6 +1553,147 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
1397 | */ | 1553 | */ |
1398 | 1554 | ||
1399 | #ifdef CONFIG_SMP | 1555 | #ifdef CONFIG_SMP |
1556 | |||
1557 | #ifdef CONFIG_INTR_REMAP | ||
1558 | static void ir_irq_migration(struct work_struct *work); | ||
1559 | |||
1560 | static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | ||
1561 | |||
1562 | /* | ||
1563 | * Migrate the IO-APIC irq in the presence of intr-remapping. | ||
1564 | * | ||
1565 | * For edge triggered, irq migration is a simple atomic update(of vector | ||
1566 | * and cpu destination) of IRTE and flush the hardware cache. | ||
1567 | * | ||
1568 | * For level triggered, we need to modify the io-apic RTE aswell with the update | ||
1569 | * vector information, along with modifying IRTE with vector and destination. | ||
1570 | * So irq migration for level triggered is little bit more complex compared to | ||
1571 | * edge triggered migration. But the good news is, we use the same algorithm | ||
1572 | * for level triggered migration as we have today, only difference being, | ||
1573 | * we now initiate the irq migration from process context instead of the | ||
1574 | * interrupt context. | ||
1575 | * | ||
1576 | * In future, when we do a directed EOI (combined with cpu EOI broadcast | ||
1577 | * suppression) to the IO-APIC, level triggered irq migration will also be | ||
1578 | * as simple as edge triggered migration and we can do the irq migration | ||
1579 | * with a simple atomic update to IO-APIC RTE. | ||
1580 | */ | ||
1581 | static void migrate_ioapic_irq(int irq, cpumask_t mask) | ||
1582 | { | ||
1583 | struct irq_cfg *cfg = irq_cfg + irq; | ||
1584 | struct irq_desc *desc = irq_desc + irq; | ||
1585 | cpumask_t tmp, cleanup_mask; | ||
1586 | struct irte irte; | ||
1587 | int modify_ioapic_rte = desc->status & IRQ_LEVEL; | ||
1588 | unsigned int dest; | ||
1589 | unsigned long flags; | ||
1590 | |||
1591 | cpus_and(tmp, mask, cpu_online_map); | ||
1592 | if (cpus_empty(tmp)) | ||
1593 | return; | ||
1594 | |||
1595 | if (get_irte(irq, &irte)) | ||
1596 | return; | ||
1597 | |||
1598 | if (assign_irq_vector(irq, mask)) | ||
1599 | return; | ||
1600 | |||
1601 | cpus_and(tmp, cfg->domain, mask); | ||
1602 | dest = cpu_mask_to_apicid(tmp); | ||
1603 | |||
1604 | if (modify_ioapic_rte) { | ||
1605 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1606 | __target_IO_APIC_irq(irq, dest, cfg->vector); | ||
1607 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1608 | } | ||
1609 | |||
1610 | irte.vector = cfg->vector; | ||
1611 | irte.dest_id = IRTE_DEST(dest); | ||
1612 | |||
1613 | /* | ||
1614 | * Modified the IRTE and flushes the Interrupt entry cache. | ||
1615 | */ | ||
1616 | modify_irte(irq, &irte); | ||
1617 | |||
1618 | if (cfg->move_in_progress) { | ||
1619 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
1620 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
1621 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
1622 | cfg->move_in_progress = 0; | ||
1623 | } | ||
1624 | |||
1625 | irq_desc[irq].affinity = mask; | ||
1626 | } | ||
1627 | |||
1628 | static int migrate_irq_remapped_level(int irq) | ||
1629 | { | ||
1630 | int ret = -1; | ||
1631 | |||
1632 | mask_IO_APIC_irq(irq); | ||
1633 | |||
1634 | if (io_apic_level_ack_pending(irq)) { | ||
1635 | /* | ||
1636 | * Interrupt in progress. Migrating irq now will change the | ||
1637 | * vector information in the IO-APIC RTE and that will confuse | ||
1638 | * the EOI broadcast performed by cpu. | ||
1639 | * So, delay the irq migration to the next instance. | ||
1640 | */ | ||
1641 | schedule_delayed_work(&ir_migration_work, 1); | ||
1642 | goto unmask; | ||
1643 | } | ||
1644 | |||
1645 | /* everthing is clear. we have right of way */ | ||
1646 | migrate_ioapic_irq(irq, irq_desc[irq].pending_mask); | ||
1647 | |||
1648 | ret = 0; | ||
1649 | irq_desc[irq].status &= ~IRQ_MOVE_PENDING; | ||
1650 | cpus_clear(irq_desc[irq].pending_mask); | ||
1651 | |||
1652 | unmask: | ||
1653 | unmask_IO_APIC_irq(irq); | ||
1654 | return ret; | ||
1655 | } | ||
1656 | |||
1657 | static void ir_irq_migration(struct work_struct *work) | ||
1658 | { | ||
1659 | int irq; | ||
1660 | |||
1661 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
1662 | struct irq_desc *desc = irq_desc + irq; | ||
1663 | if (desc->status & IRQ_MOVE_PENDING) { | ||
1664 | unsigned long flags; | ||
1665 | |||
1666 | spin_lock_irqsave(&desc->lock, flags); | ||
1667 | if (!desc->chip->set_affinity || | ||
1668 | !(desc->status & IRQ_MOVE_PENDING)) { | ||
1669 | desc->status &= ~IRQ_MOVE_PENDING; | ||
1670 | spin_unlock_irqrestore(&desc->lock, flags); | ||
1671 | continue; | ||
1672 | } | ||
1673 | |||
1674 | desc->chip->set_affinity(irq, | ||
1675 | irq_desc[irq].pending_mask); | ||
1676 | spin_unlock_irqrestore(&desc->lock, flags); | ||
1677 | } | ||
1678 | } | ||
1679 | } | ||
1680 | |||
1681 | /* | ||
1682 | * Migrates the IRQ destination in the process context. | ||
1683 | */ | ||
1684 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | ||
1685 | { | ||
1686 | if (irq_desc[irq].status & IRQ_LEVEL) { | ||
1687 | irq_desc[irq].status |= IRQ_MOVE_PENDING; | ||
1688 | irq_desc[irq].pending_mask = mask; | ||
1689 | migrate_irq_remapped_level(irq); | ||
1690 | return; | ||
1691 | } | ||
1692 | |||
1693 | migrate_ioapic_irq(irq, mask); | ||
1694 | } | ||
1695 | #endif | ||
1696 | |||
1400 | asmlinkage void smp_irq_move_cleanup_interrupt(void) | 1697 | asmlinkage void smp_irq_move_cleanup_interrupt(void) |
1401 | { | 1698 | { |
1402 | unsigned vector, me; | 1699 | unsigned vector, me; |
@@ -1453,6 +1750,17 @@ static void irq_complete_move(unsigned int irq) | |||
1453 | #else | 1750 | #else |
1454 | static inline void irq_complete_move(unsigned int irq) {} | 1751 | static inline void irq_complete_move(unsigned int irq) {} |
1455 | #endif | 1752 | #endif |
1753 | #ifdef CONFIG_INTR_REMAP | ||
1754 | static void ack_x2apic_level(unsigned int irq) | ||
1755 | { | ||
1756 | ack_x2APIC_irq(); | ||
1757 | } | ||
1758 | |||
1759 | static void ack_x2apic_edge(unsigned int irq) | ||
1760 | { | ||
1761 | ack_x2APIC_irq(); | ||
1762 | } | ||
1763 | #endif | ||
1456 | 1764 | ||
1457 | static void ack_apic_edge(unsigned int irq) | 1765 | static void ack_apic_edge(unsigned int irq) |
1458 | { | 1766 | { |
@@ -1527,6 +1835,21 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
1527 | .retrigger = ioapic_retrigger_irq, | 1835 | .retrigger = ioapic_retrigger_irq, |
1528 | }; | 1836 | }; |
1529 | 1837 | ||
1838 | #ifdef CONFIG_INTR_REMAP | ||
1839 | static struct irq_chip ir_ioapic_chip __read_mostly = { | ||
1840 | .name = "IR-IO-APIC", | ||
1841 | .startup = startup_ioapic_irq, | ||
1842 | .mask = mask_IO_APIC_irq, | ||
1843 | .unmask = unmask_IO_APIC_irq, | ||
1844 | .ack = ack_x2apic_edge, | ||
1845 | .eoi = ack_x2apic_level, | ||
1846 | #ifdef CONFIG_SMP | ||
1847 | .set_affinity = set_ir_ioapic_affinity_irq, | ||
1848 | #endif | ||
1849 | .retrigger = ioapic_retrigger_irq, | ||
1850 | }; | ||
1851 | #endif | ||
1852 | |||
1530 | static inline void init_IO_APIC_traps(void) | 1853 | static inline void init_IO_APIC_traps(void) |
1531 | { | 1854 | { |
1532 | int irq; | 1855 | int irq; |
@@ -1712,6 +2035,8 @@ static inline void __init check_timer(void) | |||
1712 | * 8259A. | 2035 | * 8259A. |
1713 | */ | 2036 | */ |
1714 | if (pin1 == -1) { | 2037 | if (pin1 == -1) { |
2038 | if (intr_remapping_enabled) | ||
2039 | panic("BIOS bug: timer not connected to IO-APIC"); | ||
1715 | pin1 = pin2; | 2040 | pin1 = pin2; |
1716 | apic1 = apic2; | 2041 | apic1 = apic2; |
1717 | no_pin1 = 1; | 2042 | no_pin1 = 1; |
@@ -1738,6 +2063,8 @@ static inline void __init check_timer(void) | |||
1738 | clear_IO_APIC_pin(0, pin1); | 2063 | clear_IO_APIC_pin(0, pin1); |
1739 | goto out; | 2064 | goto out; |
1740 | } | 2065 | } |
2066 | if (intr_remapping_enabled) | ||
2067 | panic("timer doesn't work through Interrupt-remapped IO-APIC"); | ||
1741 | clear_IO_APIC_pin(apic1, pin1); | 2068 | clear_IO_APIC_pin(apic1, pin1); |
1742 | if (!no_pin1) | 2069 | if (!no_pin1) |
1743 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " | 2070 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " |
@@ -1854,8 +2181,6 @@ void __init setup_IO_APIC(void) | |||
1854 | setup_IO_APIC_irqs(); | 2181 | setup_IO_APIC_irqs(); |
1855 | init_IO_APIC_traps(); | 2182 | init_IO_APIC_traps(); |
1856 | check_timer(); | 2183 | check_timer(); |
1857 | if (!acpi_ioapic) | ||
1858 | print_IO_APIC(); | ||
1859 | } | 2184 | } |
1860 | 2185 | ||
1861 | struct sysfs_ioapic_data { | 2186 | struct sysfs_ioapic_data { |
@@ -1977,6 +2302,9 @@ void destroy_irq(unsigned int irq) | |||
1977 | 2302 | ||
1978 | dynamic_irq_cleanup(irq); | 2303 | dynamic_irq_cleanup(irq); |
1979 | 2304 | ||
2305 | #ifdef CONFIG_INTR_REMAP | ||
2306 | free_irte(irq); | ||
2307 | #endif | ||
1980 | spin_lock_irqsave(&vector_lock, flags); | 2308 | spin_lock_irqsave(&vector_lock, flags); |
1981 | __clear_irq_vector(irq); | 2309 | __clear_irq_vector(irq); |
1982 | spin_unlock_irqrestore(&vector_lock, flags); | 2310 | spin_unlock_irqrestore(&vector_lock, flags); |
@@ -1995,11 +2323,42 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
1995 | 2323 | ||
1996 | tmp = TARGET_CPUS; | 2324 | tmp = TARGET_CPUS; |
1997 | err = assign_irq_vector(irq, tmp); | 2325 | err = assign_irq_vector(irq, tmp); |
1998 | if (!err) { | 2326 | if (err) |
1999 | cpus_and(tmp, cfg->domain, tmp); | 2327 | return err; |
2000 | dest = cpu_mask_to_apicid(tmp); | 2328 | |
2329 | cpus_and(tmp, cfg->domain, tmp); | ||
2330 | dest = cpu_mask_to_apicid(tmp); | ||
2331 | |||
2332 | #ifdef CONFIG_INTR_REMAP | ||
2333 | if (irq_remapped(irq)) { | ||
2334 | struct irte irte; | ||
2335 | int ir_index; | ||
2336 | u16 sub_handle; | ||
2337 | |||
2338 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | ||
2339 | BUG_ON(ir_index == -1); | ||
2340 | |||
2341 | memset (&irte, 0, sizeof(irte)); | ||
2342 | |||
2343 | irte.present = 1; | ||
2344 | irte.dst_mode = INT_DEST_MODE; | ||
2345 | irte.trigger_mode = 0; /* edge */ | ||
2346 | irte.dlvry_mode = INT_DELIVERY_MODE; | ||
2347 | irte.vector = cfg->vector; | ||
2348 | irte.dest_id = IRTE_DEST(dest); | ||
2349 | |||
2350 | modify_irte(irq, &irte); | ||
2001 | 2351 | ||
2002 | msg->address_hi = MSI_ADDR_BASE_HI; | 2352 | msg->address_hi = MSI_ADDR_BASE_HI; |
2353 | msg->data = sub_handle; | ||
2354 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | | ||
2355 | MSI_ADDR_IR_SHV | | ||
2356 | MSI_ADDR_IR_INDEX1(ir_index) | | ||
2357 | MSI_ADDR_IR_INDEX2(ir_index); | ||
2358 | } else | ||
2359 | #endif | ||
2360 | { | ||
2361 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
2003 | msg->address_lo = | 2362 | msg->address_lo = |
2004 | MSI_ADDR_BASE_LO | | 2363 | MSI_ADDR_BASE_LO | |
2005 | ((INT_DEST_MODE == 0) ? | 2364 | ((INT_DEST_MODE == 0) ? |
@@ -2049,6 +2408,55 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2049 | write_msi_msg(irq, &msg); | 2408 | write_msi_msg(irq, &msg); |
2050 | irq_desc[irq].affinity = mask; | 2409 | irq_desc[irq].affinity = mask; |
2051 | } | 2410 | } |
2411 | |||
2412 | #ifdef CONFIG_INTR_REMAP | ||
2413 | /* | ||
2414 | * Migrate the MSI irq to another cpumask. This migration is | ||
2415 | * done in the process context using interrupt-remapping hardware. | ||
2416 | */ | ||
2417 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | ||
2418 | { | ||
2419 | struct irq_cfg *cfg = irq_cfg + irq; | ||
2420 | unsigned int dest; | ||
2421 | cpumask_t tmp, cleanup_mask; | ||
2422 | struct irte irte; | ||
2423 | |||
2424 | cpus_and(tmp, mask, cpu_online_map); | ||
2425 | if (cpus_empty(tmp)) | ||
2426 | return; | ||
2427 | |||
2428 | if (get_irte(irq, &irte)) | ||
2429 | return; | ||
2430 | |||
2431 | if (assign_irq_vector(irq, mask)) | ||
2432 | return; | ||
2433 | |||
2434 | cpus_and(tmp, cfg->domain, mask); | ||
2435 | dest = cpu_mask_to_apicid(tmp); | ||
2436 | |||
2437 | irte.vector = cfg->vector; | ||
2438 | irte.dest_id = IRTE_DEST(dest); | ||
2439 | |||
2440 | /* | ||
2441 | * atomically update the IRTE with the new destination and vector. | ||
2442 | */ | ||
2443 | modify_irte(irq, &irte); | ||
2444 | |||
2445 | /* | ||
2446 | * After this point, all the interrupts will start arriving | ||
2447 | * at the new destination. So, time to cleanup the previous | ||
2448 | * vector allocation. | ||
2449 | */ | ||
2450 | if (cfg->move_in_progress) { | ||
2451 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
2452 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
2453 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2454 | cfg->move_in_progress = 0; | ||
2455 | } | ||
2456 | |||
2457 | irq_desc[irq].affinity = mask; | ||
2458 | } | ||
2459 | #endif | ||
2052 | #endif /* CONFIG_SMP */ | 2460 | #endif /* CONFIG_SMP */ |
2053 | 2461 | ||
2054 | /* | 2462 | /* |
@@ -2066,26 +2474,157 @@ static struct irq_chip msi_chip = { | |||
2066 | .retrigger = ioapic_retrigger_irq, | 2474 | .retrigger = ioapic_retrigger_irq, |
2067 | }; | 2475 | }; |
2068 | 2476 | ||
2069 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | 2477 | #ifdef CONFIG_INTR_REMAP |
2478 | static struct irq_chip msi_ir_chip = { | ||
2479 | .name = "IR-PCI-MSI", | ||
2480 | .unmask = unmask_msi_irq, | ||
2481 | .mask = mask_msi_irq, | ||
2482 | .ack = ack_x2apic_edge, | ||
2483 | #ifdef CONFIG_SMP | ||
2484 | .set_affinity = ir_set_msi_irq_affinity, | ||
2485 | #endif | ||
2486 | .retrigger = ioapic_retrigger_irq, | ||
2487 | }; | ||
2488 | |||
2489 | /* | ||
2490 | * Map the PCI dev to the corresponding remapping hardware unit | ||
2491 | * and allocate 'nvec' consecutive interrupt-remapping table entries | ||
2492 | * in it. | ||
2493 | */ | ||
2494 | static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | ||
2070 | { | 2495 | { |
2496 | struct intel_iommu *iommu; | ||
2497 | int index; | ||
2498 | |||
2499 | iommu = map_dev_to_ir(dev); | ||
2500 | if (!iommu) { | ||
2501 | printk(KERN_ERR | ||
2502 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | ||
2503 | return -ENOENT; | ||
2504 | } | ||
2505 | |||
2506 | index = alloc_irte(iommu, irq, nvec); | ||
2507 | if (index < 0) { | ||
2508 | printk(KERN_ERR | ||
2509 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | ||
2510 | pci_name(dev)); | ||
2511 | return -ENOSPC; | ||
2512 | } | ||
2513 | return index; | ||
2514 | } | ||
2515 | #endif | ||
2516 | |||
2517 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) | ||
2518 | { | ||
2519 | int ret; | ||
2071 | struct msi_msg msg; | 2520 | struct msi_msg msg; |
2521 | |||
2522 | ret = msi_compose_msg(dev, irq, &msg); | ||
2523 | if (ret < 0) | ||
2524 | return ret; | ||
2525 | |||
2526 | set_irq_msi(irq, desc); | ||
2527 | write_msi_msg(irq, &msg); | ||
2528 | |||
2529 | #ifdef CONFIG_INTR_REMAP | ||
2530 | if (irq_remapped(irq)) { | ||
2531 | struct irq_desc *desc = irq_desc + irq; | ||
2532 | /* | ||
2533 | * irq migration in process context | ||
2534 | */ | ||
2535 | desc->status |= IRQ_MOVE_PCNTXT; | ||
2536 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | ||
2537 | } else | ||
2538 | #endif | ||
2539 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | ||
2540 | |||
2541 | return 0; | ||
2542 | } | ||
2543 | |||
2544 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | ||
2545 | { | ||
2072 | int irq, ret; | 2546 | int irq, ret; |
2547 | |||
2073 | irq = create_irq(); | 2548 | irq = create_irq(); |
2074 | if (irq < 0) | 2549 | if (irq < 0) |
2075 | return irq; | 2550 | return irq; |
2076 | 2551 | ||
2077 | ret = msi_compose_msg(dev, irq, &msg); | 2552 | #ifdef CONFIG_INTR_REMAP |
2553 | if (!intr_remapping_enabled) | ||
2554 | goto no_ir; | ||
2555 | |||
2556 | ret = msi_alloc_irte(dev, irq, 1); | ||
2557 | if (ret < 0) | ||
2558 | goto error; | ||
2559 | no_ir: | ||
2560 | #endif | ||
2561 | ret = setup_msi_irq(dev, desc, irq); | ||
2078 | if (ret < 0) { | 2562 | if (ret < 0) { |
2079 | destroy_irq(irq); | 2563 | destroy_irq(irq); |
2080 | return ret; | 2564 | return ret; |
2081 | } | 2565 | } |
2566 | return 0; | ||
2082 | 2567 | ||
2083 | set_irq_msi(irq, desc); | 2568 | #ifdef CONFIG_INTR_REMAP |
2084 | write_msi_msg(irq, &msg); | 2569 | error: |
2570 | destroy_irq(irq); | ||
2571 | return ret; | ||
2572 | #endif | ||
2573 | } | ||
2085 | 2574 | ||
2086 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 2575 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
2576 | { | ||
2577 | int irq, ret, sub_handle; | ||
2578 | struct msi_desc *desc; | ||
2579 | #ifdef CONFIG_INTR_REMAP | ||
2580 | struct intel_iommu *iommu = 0; | ||
2581 | int index = 0; | ||
2582 | #endif | ||
2583 | |||
2584 | sub_handle = 0; | ||
2585 | list_for_each_entry(desc, &dev->msi_list, list) { | ||
2586 | irq = create_irq(); | ||
2587 | if (irq < 0) | ||
2588 | return irq; | ||
2589 | #ifdef CONFIG_INTR_REMAP | ||
2590 | if (!intr_remapping_enabled) | ||
2591 | goto no_ir; | ||
2087 | 2592 | ||
2593 | if (!sub_handle) { | ||
2594 | /* | ||
2595 | * allocate the consecutive block of IRTE's | ||
2596 | * for 'nvec' | ||
2597 | */ | ||
2598 | index = msi_alloc_irte(dev, irq, nvec); | ||
2599 | if (index < 0) { | ||
2600 | ret = index; | ||
2601 | goto error; | ||
2602 | } | ||
2603 | } else { | ||
2604 | iommu = map_dev_to_ir(dev); | ||
2605 | if (!iommu) { | ||
2606 | ret = -ENOENT; | ||
2607 | goto error; | ||
2608 | } | ||
2609 | /* | ||
2610 | * setup the mapping between the irq and the IRTE | ||
2611 | * base index, the sub_handle pointing to the | ||
2612 | * appropriate interrupt remap table entry. | ||
2613 | */ | ||
2614 | set_irte_irq(irq, iommu, index, sub_handle); | ||
2615 | } | ||
2616 | no_ir: | ||
2617 | #endif | ||
2618 | ret = setup_msi_irq(dev, desc, irq); | ||
2619 | if (ret < 0) | ||
2620 | goto error; | ||
2621 | sub_handle++; | ||
2622 | } | ||
2088 | return 0; | 2623 | return 0; |
2624 | |||
2625 | error: | ||
2626 | destroy_irq(irq); | ||
2627 | return ret; | ||
2089 | } | 2628 | } |
2090 | 2629 | ||
2091 | void arch_teardown_msi_irq(unsigned int irq) | 2630 | void arch_teardown_msi_irq(unsigned int irq) |
@@ -2333,6 +2872,10 @@ void __init setup_ioapic_dest(void) | |||
2333 | setup_IO_APIC_irq(ioapic, pin, irq, | 2872 | setup_IO_APIC_irq(ioapic, pin, irq, |
2334 | irq_trigger(irq_entry), | 2873 | irq_trigger(irq_entry), |
2335 | irq_polarity(irq_entry)); | 2874 | irq_polarity(irq_entry)); |
2875 | #ifdef CONFIG_INTR_REMAP | ||
2876 | else if (intr_remapping_enabled) | ||
2877 | set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); | ||
2878 | #endif | ||
2336 | else | 2879 | else |
2337 | set_ioapic_affinity_irq(irq, TARGET_CPUS); | 2880 | set_ioapic_affinity_irq(irq, TARGET_CPUS); |
2338 | } | 2881 | } |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 50e5e4a31c85..191914302744 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | #include <linux/syscalls.h> | 16 | #include <linux/syscalls.h> |
17 | #include <asm/syscalls.h> | ||
17 | 18 | ||
18 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ | 19 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ |
19 | static void set_bitmap(unsigned long *bitmap, unsigned int base, | 20 | static void set_bitmap(unsigned long *bitmap, unsigned int base, |
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 3f7537b669d3..f1c688e46f35 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | #ifdef CONFIG_X86_32 | 21 | #ifdef CONFIG_X86_32 |
22 | #include <mach_apic.h> | 22 | #include <mach_apic.h> |
23 | #include <mach_ipi.h> | ||
24 | |||
23 | /* | 25 | /* |
24 | * the following functions deal with sending IPIs between CPUs. | 26 | * the following functions deal with sending IPIs between CPUs. |
25 | * | 27 | * |
@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
147 | } | 149 | } |
148 | 150 | ||
149 | /* must come after the send_IPI functions above for inlining */ | 151 | /* must come after the send_IPI functions above for inlining */ |
150 | #include <mach_ipi.h> | ||
151 | static int convert_apicid_to_cpu(int apic_id) | 152 | static int convert_apicid_to_cpu(int apic_id) |
152 | { | 153 | { |
153 | int i; | 154 | int i; |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index d66914287ee1..9200a1e2752d 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -74,6 +74,15 @@ void __init init_ISA_irqs (void) | |||
74 | } | 74 | } |
75 | } | 75 | } |
76 | 76 | ||
77 | /* | ||
78 | * IRQ2 is cascade interrupt to second interrupt controller | ||
79 | */ | ||
80 | static struct irqaction irq2 = { | ||
81 | .handler = no_action, | ||
82 | .mask = CPU_MASK_NONE, | ||
83 | .name = "cascade", | ||
84 | }; | ||
85 | |||
77 | /* Overridden in paravirt.c */ | 86 | /* Overridden in paravirt.c */ |
78 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | 87 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); |
79 | 88 | ||
@@ -98,6 +107,46 @@ void __init native_init_IRQ(void) | |||
98 | set_intr_gate(vector, interrupt[i]); | 107 | set_intr_gate(vector, interrupt[i]); |
99 | } | 108 | } |
100 | 109 | ||
110 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) | ||
111 | /* | ||
112 | * IRQ0 must be given a fixed assignment and initialized, | ||
113 | * because it's used before the IO-APIC is set up. | ||
114 | */ | ||
115 | set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); | ||
116 | |||
117 | /* | ||
118 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
119 | * IPI, driven by wakeup. | ||
120 | */ | ||
121 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
122 | |||
123 | /* IPI for invalidation */ | ||
124 | alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); | ||
125 | |||
126 | /* IPI for generic function call */ | ||
127 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
128 | |||
129 | /* IPI for single call function */ | ||
130 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); | ||
131 | #endif | ||
132 | |||
133 | #ifdef CONFIG_X86_LOCAL_APIC | ||
134 | /* self generated IPI for local APIC timer */ | ||
135 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
136 | |||
137 | /* IPI vectors for APIC spurious and error interrupts */ | ||
138 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | ||
139 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
140 | #endif | ||
141 | |||
142 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) | ||
143 | /* thermal monitor LVT interrupt */ | ||
144 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
145 | #endif | ||
146 | |||
147 | if (!acpi_ioapic) | ||
148 | setup_irq(2, &irq2); | ||
149 | |||
101 | /* setup after call gates are initialised (usually add in | 150 | /* setup after call gates are initialised (usually add in |
102 | * the architecture specific gates) | 151 | * the architecture specific gates) |
103 | */ | 152 | */ |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index b68e21f06f4f..0ed5f939b905 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/ldt.h> | 18 | #include <asm/ldt.h> |
19 | #include <asm/desc.h> | 19 | #include <asm/desc.h> |
20 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
21 | #include <asm/syscalls.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
23 | static void flush_ldt(void *current_mm) | 24 | static void flush_ldt(void *current_mm) |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index b3fb430725cb..f98f4e1dba09 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -397,7 +397,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) | |||
397 | generic_bigsmp_probe(); | 397 | generic_bigsmp_probe(); |
398 | #endif | 398 | #endif |
399 | 399 | ||
400 | #ifdef CONFIG_X86_32 | ||
400 | setup_apic_routing(); | 401 | setup_apic_routing(); |
402 | #endif | ||
401 | if (!num_processors) | 403 | if (!num_processors) |
402 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); | 404 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); |
403 | return num_processors; | 405 | return num_processors; |
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index eecc8c18f010..4caff39078e0 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c | |||
@@ -229,6 +229,12 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, | |||
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | static int __init numaq_setup_ioapic_ids(void) | ||
233 | { | ||
234 | /* so can skip it */ | ||
235 | return 1; | ||
236 | } | ||
237 | |||
232 | static struct x86_quirks numaq_x86_quirks __initdata = { | 238 | static struct x86_quirks numaq_x86_quirks __initdata = { |
233 | .arch_pre_time_init = numaq_pre_time_init, | 239 | .arch_pre_time_init = numaq_pre_time_init, |
234 | .arch_time_init = NULL, | 240 | .arch_time_init = NULL, |
@@ -243,6 +249,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = { | |||
243 | .mpc_oem_bus_info = mpc_oem_bus_info, | 249 | .mpc_oem_bus_info = mpc_oem_bus_info, |
244 | .mpc_oem_pci_bus = mpc_oem_pci_bus, | 250 | .mpc_oem_pci_bus = mpc_oem_pci_bus, |
245 | .smp_read_mpc_oem = smp_read_mpc_oem, | 251 | .smp_read_mpc_oem = smp_read_mpc_oem, |
252 | .setup_ioapic_ids = numaq_setup_ioapic_ids, | ||
246 | }; | 253 | }; |
247 | 254 | ||
248 | void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | 255 | void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 300da17e61cb..6b0bb73998dd 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
330 | #endif | 330 | #endif |
331 | .wbinvd = native_wbinvd, | 331 | .wbinvd = native_wbinvd, |
332 | .read_msr = native_read_msr_safe, | 332 | .read_msr = native_read_msr_safe, |
333 | .read_msr_amd = native_read_msr_amd_safe, | ||
333 | .write_msr = native_write_msr_safe, | 334 | .write_msr = native_write_msr_safe, |
334 | .read_tsc = native_read_tsc, | 335 | .read_tsc = native_read_tsc, |
335 | .read_pmc = native_read_pmc, | 336 | .read_pmc = native_read_pmc, |
@@ -373,8 +374,6 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
373 | 374 | ||
374 | struct pv_apic_ops pv_apic_ops = { | 375 | struct pv_apic_ops pv_apic_ops = { |
375 | #ifdef CONFIG_X86_LOCAL_APIC | 376 | #ifdef CONFIG_X86_LOCAL_APIC |
376 | .apic_write = native_apic_write, | ||
377 | .apic_read = native_apic_read, | ||
378 | .setup_boot_clock = setup_boot_APIC_clock, | 377 | .setup_boot_clock = setup_boot_APIC_clock, |
379 | .setup_secondary_clock = setup_secondary_APIC_clock, | 378 | .setup_secondary_clock = setup_secondary_APIC_clock, |
380 | .startup_ipi_hook = paravirt_nop, | 379 | .startup_ipi_hook = paravirt_nop, |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3b7a1ddcc0bc..2c9abc95e026 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -55,6 +55,8 @@ | |||
55 | #include <asm/tlbflush.h> | 55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | 56 | #include <asm/cpu.h> |
57 | #include <asm/kdebug.h> | 57 | #include <asm/kdebug.h> |
58 | #include <asm/syscalls.h> | ||
59 | #include <asm/smp.h> | ||
58 | 60 | ||
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 61 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | 62 | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2a..00263c9e6500 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
52 | #include <asm/ia32.h> | 52 | #include <asm/ia32.h> |
53 | #include <asm/idle.h> | 53 | #include <asm/idle.h> |
54 | #include <asm/syscalls.h> | ||
54 | 55 | ||
55 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
56 | 57 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index e37dccce85db..fc3e8dcd9da6 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -69,7 +69,7 @@ static inline bool invalid_selector(u16 value) | |||
69 | 69 | ||
70 | #define FLAG_MASK FLAG_MASK_32 | 70 | #define FLAG_MASK FLAG_MASK_32 |
71 | 71 | ||
72 | static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) | 72 | static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) |
73 | { | 73 | { |
74 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); | 74 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); |
75 | regno >>= 2; | 75 | regno >>= 2; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 362d4e7f2d38..673f12cf6eb0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -739,6 +739,8 @@ void __init setup_arch(char **cmdline_p) | |||
739 | num_physpages = max_pfn; | 739 | num_physpages = max_pfn; |
740 | 740 | ||
741 | check_efer(); | 741 | check_efer(); |
742 | if (cpu_has_x2apic) | ||
743 | check_x2apic(); | ||
742 | 744 | ||
743 | /* How many end-of-memory variables you have, grandma! */ | 745 | /* How many end-of-memory variables you have, grandma! */ |
744 | /* need this before calling reserve_initrd */ | 746 | /* need this before calling reserve_initrd */ |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 76e305e064f9..0e67f72d9316 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -162,9 +162,16 @@ void __init setup_per_cpu_areas(void) | |||
162 | printk(KERN_INFO | 162 | printk(KERN_INFO |
163 | "cpu %d has no node %d or node-local memory\n", | 163 | "cpu %d has no node %d or node-local memory\n", |
164 | cpu, node); | 164 | cpu, node); |
165 | if (ptr) | ||
166 | printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", | ||
167 | cpu, __pa(ptr)); | ||
165 | } | 168 | } |
166 | else | 169 | else { |
167 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | 170 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); |
171 | if (ptr) | ||
172 | printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", | ||
173 | cpu, node, __pa(ptr)); | ||
174 | } | ||
168 | #endif | 175 | #endif |
169 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 176 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
170 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 177 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h index 72bbb519d2dc..6dd7e2b70a4b 100644 --- a/arch/x86/kernel/sigframe.h +++ b/arch/x86/kernel/sigframe.h | |||
@@ -3,9 +3,18 @@ struct sigframe { | |||
3 | char __user *pretcode; | 3 | char __user *pretcode; |
4 | int sig; | 4 | int sig; |
5 | struct sigcontext sc; | 5 | struct sigcontext sc; |
6 | struct _fpstate fpstate; | 6 | /* |
7 | * fpstate is unused. fpstate is moved/allocated after | ||
8 | * retcode[] below. This movement allows to have the FP state and the | ||
9 | * future state extensions (xsave) stay together. | ||
10 | * And at the same time retaining the unused fpstate, prevents changing | ||
11 | * the offset of extramask[] in the sigframe and thus prevent any | ||
12 | * legacy application accessing/modifying it. | ||
13 | */ | ||
14 | struct _fpstate fpstate_unused; | ||
7 | unsigned long extramask[_NSIG_WORDS-1]; | 15 | unsigned long extramask[_NSIG_WORDS-1]; |
8 | char retcode[8]; | 16 | char retcode[8]; |
17 | /* fp state follows here */ | ||
9 | }; | 18 | }; |
10 | 19 | ||
11 | struct rt_sigframe { | 20 | struct rt_sigframe { |
@@ -15,13 +24,14 @@ struct rt_sigframe { | |||
15 | void __user *puc; | 24 | void __user *puc; |
16 | struct siginfo info; | 25 | struct siginfo info; |
17 | struct ucontext uc; | 26 | struct ucontext uc; |
18 | struct _fpstate fpstate; | ||
19 | char retcode[8]; | 27 | char retcode[8]; |
28 | /* fp state follows here */ | ||
20 | }; | 29 | }; |
21 | #else | 30 | #else |
22 | struct rt_sigframe { | 31 | struct rt_sigframe { |
23 | char __user *pretcode; | 32 | char __user *pretcode; |
24 | struct ucontext uc; | 33 | struct ucontext uc; |
25 | struct siginfo info; | 34 | struct siginfo info; |
35 | /* fp state follows here */ | ||
26 | }; | 36 | }; |
27 | #endif | 37 | #endif |
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 6fb5bcdd8933..8d380b699c0c 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include <asm/i387.h> | 27 | #include <asm/i387.h> |
28 | #include <asm/vdso.h> | 28 | #include <asm/vdso.h> |
29 | #include <asm/syscalls.h> | ||
29 | 30 | ||
30 | #include "sigframe.h" | 31 | #include "sigframe.h" |
31 | 32 | ||
@@ -159,28 +160,14 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
159 | } | 160 | } |
160 | 161 | ||
161 | { | 162 | { |
162 | struct _fpstate __user *buf; | 163 | void __user *buf; |
163 | 164 | ||
164 | err |= __get_user(buf, &sc->fpstate); | 165 | err |= __get_user(buf, &sc->fpstate); |
165 | if (buf) { | 166 | err |= restore_i387_xstate(buf); |
166 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
167 | goto badframe; | ||
168 | err |= restore_i387(buf); | ||
169 | } else { | ||
170 | struct task_struct *me = current; | ||
171 | |||
172 | if (used_math()) { | ||
173 | clear_fpu(me); | ||
174 | clear_used_math(); | ||
175 | } | ||
176 | } | ||
177 | } | 167 | } |
178 | 168 | ||
179 | err |= __get_user(*pax, &sc->ax); | 169 | err |= __get_user(*pax, &sc->ax); |
180 | return err; | 170 | return err; |
181 | |||
182 | badframe: | ||
183 | return 1; | ||
184 | } | 171 | } |
185 | 172 | ||
186 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) | 173 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) |
@@ -262,7 +249,7 @@ badframe: | |||
262 | * Set up a signal frame. | 249 | * Set up a signal frame. |
263 | */ | 250 | */ |
264 | static int | 251 | static int |
265 | setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | 252 | setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
266 | struct pt_regs *regs, unsigned long mask) | 253 | struct pt_regs *regs, unsigned long mask) |
267 | { | 254 | { |
268 | int tmp, err = 0; | 255 | int tmp, err = 0; |
@@ -289,7 +276,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
289 | err |= __put_user(regs->sp, &sc->sp_at_signal); | 276 | err |= __put_user(regs->sp, &sc->sp_at_signal); |
290 | err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); | 277 | err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); |
291 | 278 | ||
292 | tmp = save_i387(fpstate); | 279 | tmp = save_i387_xstate(fpstate); |
293 | if (tmp < 0) | 280 | if (tmp < 0) |
294 | err = 1; | 281 | err = 1; |
295 | else | 282 | else |
@@ -306,7 +293,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
306 | * Determine which stack to use.. | 293 | * Determine which stack to use.. |
307 | */ | 294 | */ |
308 | static inline void __user * | 295 | static inline void __user * |
309 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 296 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
297 | void **fpstate) | ||
310 | { | 298 | { |
311 | unsigned long sp; | 299 | unsigned long sp; |
312 | 300 | ||
@@ -332,6 +320,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
332 | sp = (unsigned long) ka->sa.sa_restorer; | 320 | sp = (unsigned long) ka->sa.sa_restorer; |
333 | } | 321 | } |
334 | 322 | ||
323 | if (used_math()) { | ||
324 | sp = sp - sig_xstate_size; | ||
325 | *fpstate = (struct _fpstate *) sp; | ||
326 | } | ||
327 | |||
335 | sp -= frame_size; | 328 | sp -= frame_size; |
336 | /* | 329 | /* |
337 | * Align the stack pointer according to the i386 ABI, | 330 | * Align the stack pointer according to the i386 ABI, |
@@ -350,8 +343,9 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
350 | void __user *restorer; | 343 | void __user *restorer; |
351 | int err = 0; | 344 | int err = 0; |
352 | int usig; | 345 | int usig; |
346 | void __user *fpstate = NULL; | ||
353 | 347 | ||
354 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 348 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
355 | 349 | ||
356 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 350 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
357 | goto give_sigsegv; | 351 | goto give_sigsegv; |
@@ -366,7 +360,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
366 | if (err) | 360 | if (err) |
367 | goto give_sigsegv; | 361 | goto give_sigsegv; |
368 | 362 | ||
369 | err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); | 363 | err = setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]); |
370 | if (err) | 364 | if (err) |
371 | goto give_sigsegv; | 365 | goto give_sigsegv; |
372 | 366 | ||
@@ -427,8 +421,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
427 | void __user *restorer; | 421 | void __user *restorer; |
428 | int err = 0; | 422 | int err = 0; |
429 | int usig; | 423 | int usig; |
424 | void __user *fpstate = NULL; | ||
430 | 425 | ||
431 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 426 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
432 | 427 | ||
433 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 428 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
434 | goto give_sigsegv; | 429 | goto give_sigsegv; |
@@ -447,13 +442,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
447 | goto give_sigsegv; | 442 | goto give_sigsegv; |
448 | 443 | ||
449 | /* Create the ucontext. */ | 444 | /* Create the ucontext. */ |
450 | err |= __put_user(0, &frame->uc.uc_flags); | 445 | if (cpu_has_xsave) |
446 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
447 | else | ||
448 | err |= __put_user(0, &frame->uc.uc_flags); | ||
451 | err |= __put_user(0, &frame->uc.uc_link); | 449 | err |= __put_user(0, &frame->uc.uc_link); |
452 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 450 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
453 | err |= __put_user(sas_ss_flags(regs->sp), | 451 | err |= __put_user(sas_ss_flags(regs->sp), |
454 | &frame->uc.uc_stack.ss_flags); | 452 | &frame->uc.uc_stack.ss_flags); |
455 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 453 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
456 | err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 454 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
457 | regs, set->sig[0]); | 455 | regs, set->sig[0]); |
458 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 456 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
459 | if (err) | 457 | if (err) |
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index ca316b5b742c..4665b598a376 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/proto.h> | 26 | #include <asm/proto.h> |
27 | #include <asm/ia32_unistd.h> | 27 | #include <asm/ia32_unistd.h> |
28 | #include <asm/mce.h> | 28 | #include <asm/mce.h> |
29 | #include <asm/syscalls.h> | ||
29 | #include "sigframe.h" | 30 | #include "sigframe.h" |
30 | 31 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
@@ -54,69 +55,6 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | |||
54 | } | 55 | } |
55 | 56 | ||
56 | /* | 57 | /* |
57 | * Signal frame handlers. | ||
58 | */ | ||
59 | |||
60 | static inline int save_i387(struct _fpstate __user *buf) | ||
61 | { | ||
62 | struct task_struct *tsk = current; | ||
63 | int err = 0; | ||
64 | |||
65 | BUILD_BUG_ON(sizeof(struct user_i387_struct) != | ||
66 | sizeof(tsk->thread.xstate->fxsave)); | ||
67 | |||
68 | if ((unsigned long)buf % 16) | ||
69 | printk("save_i387: bad fpstate %p\n", buf); | ||
70 | |||
71 | if (!used_math()) | ||
72 | return 0; | ||
73 | clear_used_math(); /* trigger finit */ | ||
74 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
75 | err = save_i387_checking((struct i387_fxsave_struct __user *) | ||
76 | buf); | ||
77 | if (err) | ||
78 | return err; | ||
79 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
80 | stts(); | ||
81 | } else { | ||
82 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | ||
83 | sizeof(struct i387_fxsave_struct))) | ||
84 | return -1; | ||
85 | } | ||
86 | return 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * This restores directly out of user space. Exceptions are handled. | ||
91 | */ | ||
92 | static inline int restore_i387(struct _fpstate __user *buf) | ||
93 | { | ||
94 | struct task_struct *tsk = current; | ||
95 | int err; | ||
96 | |||
97 | if (!used_math()) { | ||
98 | err = init_fpu(tsk); | ||
99 | if (err) | ||
100 | return err; | ||
101 | } | ||
102 | |||
103 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
104 | clts(); | ||
105 | task_thread_info(current)->status |= TS_USEDFPU; | ||
106 | } | ||
107 | err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | ||
108 | if (unlikely(err)) { | ||
109 | /* | ||
110 | * Encountered an error while doing the restore from the | ||
111 | * user buffer, clear the fpu state. | ||
112 | */ | ||
113 | clear_fpu(tsk); | ||
114 | clear_used_math(); | ||
115 | } | ||
116 | return err; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Do a signal return; undo the signal stack. | 58 | * Do a signal return; undo the signal stack. |
121 | */ | 59 | */ |
122 | static int | 60 | static int |
@@ -160,25 +98,11 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
160 | { | 98 | { |
161 | struct _fpstate __user * buf; | 99 | struct _fpstate __user * buf; |
162 | err |= __get_user(buf, &sc->fpstate); | 100 | err |= __get_user(buf, &sc->fpstate); |
163 | 101 | err |= restore_i387_xstate(buf); | |
164 | if (buf) { | ||
165 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
166 | goto badframe; | ||
167 | err |= restore_i387(buf); | ||
168 | } else { | ||
169 | struct task_struct *me = current; | ||
170 | if (used_math()) { | ||
171 | clear_fpu(me); | ||
172 | clear_used_math(); | ||
173 | } | ||
174 | } | ||
175 | } | 102 | } |
176 | 103 | ||
177 | err |= __get_user(*pax, &sc->ax); | 104 | err |= __get_user(*pax, &sc->ax); |
178 | return err; | 105 | return err; |
179 | |||
180 | badframe: | ||
181 | return 1; | ||
182 | } | 106 | } |
183 | 107 | ||
184 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 108 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) |
@@ -269,26 +193,23 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) | |||
269 | sp = current->sas_ss_sp + current->sas_ss_size; | 193 | sp = current->sas_ss_sp + current->sas_ss_size; |
270 | } | 194 | } |
271 | 195 | ||
272 | return (void __user *)round_down(sp - size, 16); | 196 | return (void __user *)round_down(sp - size, 64); |
273 | } | 197 | } |
274 | 198 | ||
275 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 199 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
276 | sigset_t *set, struct pt_regs * regs) | 200 | sigset_t *set, struct pt_regs * regs) |
277 | { | 201 | { |
278 | struct rt_sigframe __user *frame; | 202 | struct rt_sigframe __user *frame; |
279 | struct _fpstate __user *fp = NULL; | 203 | void __user *fp = NULL; |
280 | int err = 0; | 204 | int err = 0; |
281 | struct task_struct *me = current; | 205 | struct task_struct *me = current; |
282 | 206 | ||
283 | if (used_math()) { | 207 | if (used_math()) { |
284 | fp = get_stack(ka, regs, sizeof(struct _fpstate)); | 208 | fp = get_stack(ka, regs, sig_xstate_size); |
285 | frame = (void __user *)round_down( | 209 | frame = (void __user *)round_down( |
286 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | 210 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; |
287 | 211 | ||
288 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) | 212 | if (save_i387_xstate(fp) < 0) |
289 | goto give_sigsegv; | ||
290 | |||
291 | if (save_i387(fp) < 0) | ||
292 | err |= -1; | 213 | err |= -1; |
293 | } else | 214 | } else |
294 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; | 215 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; |
@@ -303,7 +224,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
303 | } | 224 | } |
304 | 225 | ||
305 | /* Create the ucontext. */ | 226 | /* Create the ucontext. */ |
306 | err |= __put_user(0, &frame->uc.uc_flags); | 227 | if (cpu_has_xsave) |
228 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
229 | else | ||
230 | err |= __put_user(0, &frame->uc.uc_flags); | ||
307 | err |= __put_user(0, &frame->uc.uc_link); | 231 | err |= __put_user(0, &frame->uc.uc_link); |
308 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 232 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
309 | err |= __put_user(sas_ss_flags(regs->sp), | 233 | err |= __put_user(sas_ss_flags(regs->sp), |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7985c5b3f916..aa804c64b167 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -88,7 +88,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |||
88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | 88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | 89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
90 | #else | 90 | #else |
91 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | 91 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | 92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | 93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) |
94 | #endif | 94 | #endif |
@@ -123,13 +123,12 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
123 | 123 | ||
124 | static atomic_t init_deasserted; | 124 | static atomic_t init_deasserted; |
125 | 125 | ||
126 | static int boot_cpu_logical_apicid; | ||
127 | 126 | ||
128 | /* representing cpus for which sibling maps can be computed */ | 127 | /* representing cpus for which sibling maps can be computed */ |
129 | static cpumask_t cpu_sibling_setup_map; | 128 | static cpumask_t cpu_sibling_setup_map; |
130 | 129 | ||
131 | /* Set if we find a B stepping CPU */ | 130 | /* Set if we find a B stepping CPU */ |
132 | int __cpuinitdata smp_b_stepping; | 131 | static int __cpuinitdata smp_b_stepping; |
133 | 132 | ||
134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 133 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
135 | 134 | ||
@@ -165,6 +164,8 @@ static void unmap_cpu_to_node(int cpu) | |||
165 | #endif | 164 | #endif |
166 | 165 | ||
167 | #ifdef CONFIG_X86_32 | 166 | #ifdef CONFIG_X86_32 |
167 | static int boot_cpu_logical_apicid; | ||
168 | |||
168 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | 169 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = |
169 | { [0 ... NR_CPUS-1] = BAD_APICID }; | 170 | { [0 ... NR_CPUS-1] = BAD_APICID }; |
170 | 171 | ||
@@ -210,7 +211,7 @@ static void __cpuinit smp_callin(void) | |||
210 | /* | 211 | /* |
211 | * (This works even if the APIC is not enabled.) | 212 | * (This works even if the APIC is not enabled.) |
212 | */ | 213 | */ |
213 | phys_id = GET_APIC_ID(read_apic_id()); | 214 | phys_id = read_apic_id(); |
214 | cpuid = smp_processor_id(); | 215 | cpuid = smp_processor_id(); |
215 | if (cpu_isset(cpuid, cpu_callin_map)) { | 216 | if (cpu_isset(cpuid, cpu_callin_map)) { |
216 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, | 217 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, |
@@ -550,8 +551,7 @@ static inline void __inquire_remote_apic(int apicid) | |||
550 | printk(KERN_CONT | 551 | printk(KERN_CONT |
551 | "a previous APIC delivery may have failed\n"); | 552 | "a previous APIC delivery may have failed\n"); |
552 | 553 | ||
553 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | 554 | apic_icr_write(APIC_DM_REMRD | regs[i], apicid); |
554 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | ||
555 | 555 | ||
556 | timeout = 0; | 556 | timeout = 0; |
557 | do { | 557 | do { |
@@ -583,11 +583,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |||
583 | int maxlvt; | 583 | int maxlvt; |
584 | 584 | ||
585 | /* Target chip */ | 585 | /* Target chip */ |
586 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | ||
587 | |||
588 | /* Boot on the stack */ | 586 | /* Boot on the stack */ |
589 | /* Kick the second */ | 587 | /* Kick the second */ |
590 | apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | 588 | apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); |
591 | 589 | ||
592 | pr_debug("Waiting for send to finish...\n"); | 590 | pr_debug("Waiting for send to finish...\n"); |
593 | send_status = safe_apic_wait_icr_idle(); | 591 | send_status = safe_apic_wait_icr_idle(); |
@@ -640,13 +638,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
640 | /* | 638 | /* |
641 | * Turn INIT on target chip | 639 | * Turn INIT on target chip |
642 | */ | 640 | */ |
643 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
644 | |||
645 | /* | 641 | /* |
646 | * Send IPI | 642 | * Send IPI |
647 | */ | 643 | */ |
648 | apic_write(APIC_ICR, | 644 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, |
649 | APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); | 645 | phys_apicid); |
650 | 646 | ||
651 | pr_debug("Waiting for send to finish...\n"); | 647 | pr_debug("Waiting for send to finish...\n"); |
652 | send_status = safe_apic_wait_icr_idle(); | 648 | send_status = safe_apic_wait_icr_idle(); |
@@ -656,10 +652,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
656 | pr_debug("Deasserting INIT.\n"); | 652 | pr_debug("Deasserting INIT.\n"); |
657 | 653 | ||
658 | /* Target chip */ | 654 | /* Target chip */ |
659 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
660 | |||
661 | /* Send IPI */ | 655 | /* Send IPI */ |
662 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | 656 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); |
663 | 657 | ||
664 | pr_debug("Waiting for send to finish...\n"); | 658 | pr_debug("Waiting for send to finish...\n"); |
665 | send_status = safe_apic_wait_icr_idle(); | 659 | send_status = safe_apic_wait_icr_idle(); |
@@ -702,11 +696,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
702 | */ | 696 | */ |
703 | 697 | ||
704 | /* Target chip */ | 698 | /* Target chip */ |
705 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
706 | |||
707 | /* Boot on the stack */ | 699 | /* Boot on the stack */ |
708 | /* Kick the second */ | 700 | /* Kick the second */ |
709 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12)); | 701 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
702 | phys_apicid); | ||
710 | 703 | ||
711 | /* | 704 | /* |
712 | * Give the other CPU some time to accept the IPI. | 705 | * Give the other CPU some time to accept the IPI. |
@@ -1175,10 +1168,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1175 | * Setup boot CPU information | 1168 | * Setup boot CPU information |
1176 | */ | 1169 | */ |
1177 | smp_store_cpu_info(0); /* Final full version of the data */ | 1170 | smp_store_cpu_info(0); /* Final full version of the data */ |
1171 | #ifdef CONFIG_X86_32 | ||
1178 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1172 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1173 | #endif | ||
1179 | current_thread_info()->cpu = 0; /* needed? */ | 1174 | current_thread_info()->cpu = 0; /* needed? */ |
1180 | set_cpu_sibling_map(0); | 1175 | set_cpu_sibling_map(0); |
1181 | 1176 | ||
1177 | #ifdef CONFIG_X86_64 | ||
1178 | enable_IR_x2apic(); | ||
1179 | setup_apic_routing(); | ||
1180 | #endif | ||
1181 | |||
1182 | if (smp_sanity_check(max_cpus) < 0) { | 1182 | if (smp_sanity_check(max_cpus) < 0) { |
1183 | printk(KERN_INFO "SMP disabled\n"); | 1183 | printk(KERN_INFO "SMP disabled\n"); |
1184 | disable_smp(); | 1184 | disable_smp(); |
@@ -1186,9 +1186,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | preempt_disable(); | 1188 | preempt_disable(); |
1189 | if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { | 1189 | if (read_apic_id() != boot_cpu_physical_apicid) { |
1190 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | 1190 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
1191 | GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); | 1191 | read_apic_id(), boot_cpu_physical_apicid); |
1192 | /* Or can we switch back to PIC here? */ | 1192 | /* Or can we switch back to PIC here? */ |
1193 | } | 1193 | } |
1194 | preempt_enable(); | 1194 | preempt_enable(); |
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index d67ce5f044ba..7b987852e876 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | #include <asm/bios_ebda.h> | 32 | #include <asm/bios_ebda.h> |
33 | #include <asm/mach-summit/mach_mpparse.h> | 33 | #include <asm/summit/mpparse.h> |
34 | 34 | ||
35 | static struct rio_table_hdr *rio_table_hdr __initdata; | 35 | static struct rio_table_hdr *rio_table_hdr __initdata; |
36 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; | 36 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 7066cb855a60..1884a8d12bfa 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
24 | 24 | ||
25 | #include <asm/syscalls.h> | ||
26 | |||
25 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | 27 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, |
26 | unsigned long prot, unsigned long flags, | 28 | unsigned long prot, unsigned long flags, |
27 | unsigned long fd, unsigned long pgoff) | 29 | unsigned long fd, unsigned long pgoff) |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 3b360ef33817..c9288c883e20 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | ||
19 | 20 | ||
20 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, | 21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, |
21 | unsigned long fd, unsigned long off) | 22 | unsigned long fd, unsigned long off) |
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c index 170d43c17487..3d1be4f0fac5 100644 --- a/arch/x86/kernel/syscall_64.c +++ b/arch/x86/kernel/syscall_64.c | |||
@@ -8,12 +8,12 @@ | |||
8 | #define __NO_STUBS | 8 | #define __NO_STUBS |
9 | 9 | ||
10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | 10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; |
11 | #undef _ASM_X86_64_UNISTD_H_ | 11 | #undef ASM_X86__UNISTD_64_H |
12 | #include <asm/unistd_64.h> | 12 | #include <asm/unistd_64.h> |
13 | 13 | ||
14 | #undef __SYSCALL | 14 | #undef __SYSCALL |
15 | #define __SYSCALL(nr, sym) [nr] = sym, | 15 | #define __SYSCALL(nr, sym) [nr] = sym, |
16 | #undef _ASM_X86_64_UNISTD_H_ | 16 | #undef ASM_X86__UNISTD_64_H |
17 | 17 | ||
18 | typedef void (*sys_call_ptr_t)(void); | 18 | typedef void (*sys_call_ptr_t)(void); |
19 | 19 | ||
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index ffe3c664afc0..bbecf8b6bf96 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/arch_hooks.h> | 36 | #include <asm/arch_hooks.h> |
37 | #include <asm/hpet.h> | 37 | #include <asm/hpet.h> |
38 | #include <asm/time.h> | 38 | #include <asm/time.h> |
39 | #include <asm/timer.h> | ||
39 | 40 | ||
40 | #include "do_timer.h" | 41 | #include "do_timer.h" |
41 | 42 | ||
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index ab6bf375a307..6bb7b8579e70 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/ldt.h> | 10 | #include <asm/ldt.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/proto.h> | 12 | #include <asm/proto.h> |
13 | #include <asm/syscalls.h> | ||
13 | 14 | ||
14 | #include "tls.h" | 15 | #include "tls.h" |
15 | 16 | ||
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 03df8e45e5a1..da5a5964fccb 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -1228,7 +1228,6 @@ void __init trap_init(void) | |||
1228 | 1228 | ||
1229 | set_bit(SYSCALL_VECTOR, used_vectors); | 1229 | set_bit(SYSCALL_VECTOR, used_vectors); |
1230 | 1230 | ||
1231 | init_thread_xstate(); | ||
1232 | /* | 1231 | /* |
1233 | * Should be a barrier for any external CPU state: | 1232 | * Should be a barrier for any external CPU state: |
1234 | */ | 1233 | */ |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 513caaca7115..b42068fb7b76 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -339,9 +339,8 @@ static void | |||
339 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 339 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
340 | unsigned long *stack, unsigned long bp, char *log_lvl) | 340 | unsigned long *stack, unsigned long bp, char *log_lvl) |
341 | { | 341 | { |
342 | printk("\nCall Trace:\n"); | 342 | printk("Call Trace:\n"); |
343 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); | 343 | dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); |
344 | printk("\n"); | ||
345 | } | 344 | } |
346 | 345 | ||
347 | void show_trace(struct task_struct *task, struct pt_regs *regs, | 346 | void show_trace(struct task_struct *task, struct pt_regs *regs, |
@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
386 | printk(" %016lx", *stack++); | 385 | printk(" %016lx", *stack++); |
387 | touch_nmi_watchdog(); | 386 | touch_nmi_watchdog(); |
388 | } | 387 | } |
388 | printk("\n"); | ||
389 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); | 389 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
390 | } | 390 | } |
391 | 391 | ||
@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs) | |||
443 | printk("Stack: "); | 443 | printk("Stack: "); |
444 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, | 444 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
445 | regs->bp, ""); | 445 | regs->bp, ""); |
446 | printk("\n"); | ||
447 | 446 | ||
448 | printk(KERN_EMERG "Code: "); | 447 | printk(KERN_EMERG "Code: "); |
449 | 448 | ||
@@ -1134,7 +1133,7 @@ asmlinkage void math_state_restore(void) | |||
1134 | /* | 1133 | /* |
1135 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | 1134 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. |
1136 | */ | 1135 | */ |
1137 | if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) { | 1136 | if (unlikely(restore_fpu_checking(me))) { |
1138 | stts(); | 1137 | stts(); |
1139 | force_sig(SIGSEGV, me); | 1138 | force_sig(SIGSEGV, me); |
1140 | return; | 1139 | return; |
@@ -1173,10 +1172,6 @@ void __init trap_init(void) | |||
1173 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 1172 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
1174 | #endif | 1173 | #endif |
1175 | /* | 1174 | /* |
1176 | * initialize the per thread extended state: | ||
1177 | */ | ||
1178 | init_thread_xstate(); | ||
1179 | /* | ||
1180 | * Should be a barrier for any external CPU state: | 1175 | * Should be a barrier for any external CPU state: |
1181 | */ | 1176 | */ |
1182 | cpu_init(); | 1177 | cpu_init(); |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 38f566fa27d2..4eeb5cf9720d 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | #include <asm/tlbflush.h> | 47 | #include <asm/tlbflush.h> |
48 | #include <asm/irq.h> | 48 | #include <asm/irq.h> |
49 | #include <asm/syscalls.h> | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Known problems: | 52 | * Known problems: |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 6ca515d6db54..61531d5c9507 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -905,8 +905,8 @@ static inline int __init activate_vmi(void) | |||
905 | #endif | 905 | #endif |
906 | 906 | ||
907 | #ifdef CONFIG_X86_LOCAL_APIC | 907 | #ifdef CONFIG_X86_LOCAL_APIC |
908 | para_fill(pv_apic_ops.apic_read, APICRead); | 908 | para_fill(apic_ops->read, APICRead); |
909 | para_fill(pv_apic_ops.apic_write, APICWrite); | 909 | para_fill(apic_ops->write, APICWrite); |
910 | #endif | 910 | #endif |
911 | 911 | ||
912 | /* | 912 | /* |
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index af5bdad84604..a9b8560adbc2 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
@@ -140,10 +140,10 @@ SECTIONS | |||
140 | *(.con_initcall.init) | 140 | *(.con_initcall.init) |
141 | __con_initcall_end = .; | 141 | __con_initcall_end = .; |
142 | } | 142 | } |
143 | .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { | 143 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
144 | __x86cpuvendor_start = .; | 144 | __x86_cpu_dev_start = .; |
145 | *(.x86cpuvendor.init) | 145 | *(.x86_cpu_dev.init) |
146 | __x86cpuvendor_end = .; | 146 | __x86_cpu_dev_end = .; |
147 | } | 147 | } |
148 | SECURITY_INIT | 148 | SECURITY_INIT |
149 | . = ALIGN(4); | 149 | . = ALIGN(4); |
@@ -180,6 +180,7 @@ SECTIONS | |||
180 | . = ALIGN(PAGE_SIZE); | 180 | . = ALIGN(PAGE_SIZE); |
181 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { | 181 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { |
182 | __per_cpu_start = .; | 182 | __per_cpu_start = .; |
183 | *(.data.percpu.page_aligned) | ||
183 | *(.data.percpu) | 184 | *(.data.percpu) |
184 | *(.data.percpu.shared_aligned) | 185 | *(.data.percpu.shared_aligned) |
185 | __per_cpu_end = .; | 186 | __per_cpu_end = .; |
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 63e5c1a22e88..201e81a91a95 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S | |||
@@ -168,13 +168,12 @@ SECTIONS | |||
168 | *(.con_initcall.init) | 168 | *(.con_initcall.init) |
169 | } | 169 | } |
170 | __con_initcall_end = .; | 170 | __con_initcall_end = .; |
171 | . = ALIGN(16); | 171 | __x86_cpu_dev_start = .; |
172 | __x86cpuvendor_start = .; | 172 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
173 | .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { | 173 | *(.x86_cpu_dev.init) |
174 | *(.x86cpuvendor.init) | ||
175 | } | 174 | } |
176 | __x86cpuvendor_end = .; | ||
177 | SECURITY_INIT | 175 | SECURITY_INIT |
176 | __x86_cpu_dev_end = .; | ||
178 | 177 | ||
179 | . = ALIGN(8); | 178 | . = ALIGN(8); |
180 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { | 179 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c new file mode 100644 index 000000000000..07713d64debe --- /dev/null +++ b/arch/x86/kernel/xsave.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * xsave/xrstor support. | ||
3 | * | ||
4 | * Author: Suresh Siddha <suresh.b.siddha@intel.com> | ||
5 | */ | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/compat.h> | ||
8 | #include <asm/i387.h> | ||
9 | #ifdef CONFIG_IA32_EMULATION | ||
10 | #include <asm/sigcontext32.h> | ||
11 | #endif | ||
12 | #include <asm/xcr.h> | ||
13 | |||
14 | /* | ||
15 | * Supported feature mask by the CPU and the kernel. | ||
16 | */ | ||
17 | u64 pcntxt_mask; | ||
18 | |||
19 | struct _fpx_sw_bytes fx_sw_reserved; | ||
20 | #ifdef CONFIG_IA32_EMULATION | ||
21 | struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
22 | #endif | ||
23 | |||
24 | /* | ||
25 | * Check for the presence of extended state information in the | ||
26 | * user fpstate pointer in the sigcontext. | ||
27 | */ | ||
28 | int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
29 | void __user *fpstate, | ||
30 | struct _fpx_sw_bytes *fx_sw_user) | ||
31 | { | ||
32 | int min_xstate_size = sizeof(struct i387_fxsave_struct) + | ||
33 | sizeof(struct xsave_hdr_struct); | ||
34 | unsigned int magic2; | ||
35 | int err; | ||
36 | |||
37 | err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], | ||
38 | sizeof(struct _fpx_sw_bytes)); | ||
39 | |||
40 | if (err) | ||
41 | return err; | ||
42 | |||
43 | /* | ||
44 | * First Magic check failed. | ||
45 | */ | ||
46 | if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) | ||
47 | return -1; | ||
48 | |||
49 | /* | ||
50 | * Check for error scenarios. | ||
51 | */ | ||
52 | if (fx_sw_user->xstate_size < min_xstate_size || | ||
53 | fx_sw_user->xstate_size > xstate_size || | ||
54 | fx_sw_user->xstate_size > fx_sw_user->extended_size) | ||
55 | return -1; | ||
56 | |||
57 | err = __get_user(magic2, (__u32 *) (((void *)fpstate) + | ||
58 | fx_sw_user->extended_size - | ||
59 | FP_XSTATE_MAGIC2_SIZE)); | ||
60 | /* | ||
61 | * Check for the presence of second magic word at the end of memory | ||
62 | * layout. This detects the case where the user just copied the legacy | ||
63 | * fpstate layout with out copying the extended state information | ||
64 | * in the memory layout. | ||
65 | */ | ||
66 | if (err || magic2 != FP_XSTATE_MAGIC2) | ||
67 | return -1; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | #ifdef CONFIG_X86_64 | ||
73 | /* | ||
74 | * Signal frame handlers. | ||
75 | */ | ||
76 | |||
77 | int save_i387_xstate(void __user *buf) | ||
78 | { | ||
79 | struct task_struct *tsk = current; | ||
80 | int err = 0; | ||
81 | |||
82 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size)) | ||
83 | return -EACCES; | ||
84 | |||
85 | BUG_ON(sig_xstate_size < xstate_size); | ||
86 | |||
87 | if ((unsigned long)buf % 64) | ||
88 | printk("save_i387_xstate: bad fpstate %p\n", buf); | ||
89 | |||
90 | if (!used_math()) | ||
91 | return 0; | ||
92 | clear_used_math(); /* trigger finit */ | ||
93 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
94 | /* | ||
95 | * Start with clearing the user buffer. This will present a | ||
96 | * clean context for the bytes not touched by the fxsave/xsave. | ||
97 | */ | ||
98 | __clear_user(buf, sig_xstate_size); | ||
99 | |||
100 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
101 | err = xsave_user(buf); | ||
102 | else | ||
103 | err = fxsave_user(buf); | ||
104 | |||
105 | if (err) | ||
106 | return err; | ||
107 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
108 | stts(); | ||
109 | } else { | ||
110 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | ||
111 | xstate_size)) | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
116 | struct _fpstate __user *fx = buf; | ||
117 | |||
118 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved, | ||
119 | sizeof(struct _fpx_sw_bytes)); | ||
120 | |||
121 | err |= __put_user(FP_XSTATE_MAGIC2, | ||
122 | (__u32 __user *) (buf + sig_xstate_size | ||
123 | - FP_XSTATE_MAGIC2_SIZE)); | ||
124 | } | ||
125 | |||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Restore the extended state if present. Otherwise, restore the FP/SSE | ||
131 | * state. | ||
132 | */ | ||
133 | int restore_user_xstate(void __user *buf) | ||
134 | { | ||
135 | struct _fpx_sw_bytes fx_sw_user; | ||
136 | u64 mask; | ||
137 | int err; | ||
138 | |||
139 | if (((unsigned long)buf % 64) || | ||
140 | check_for_xstate(buf, buf, &fx_sw_user)) | ||
141 | goto fx_only; | ||
142 | |||
143 | mask = fx_sw_user.xstate_bv; | ||
144 | |||
145 | /* | ||
146 | * restore the state passed by the user. | ||
147 | */ | ||
148 | err = xrestore_user(buf, mask); | ||
149 | if (err) | ||
150 | return err; | ||
151 | |||
152 | /* | ||
153 | * init the state skipped by the user. | ||
154 | */ | ||
155 | mask = pcntxt_mask & ~mask; | ||
156 | |||
157 | xrstor_state(init_xstate_buf, mask); | ||
158 | |||
159 | return 0; | ||
160 | |||
161 | fx_only: | ||
162 | /* | ||
163 | * couldn't find the extended state information in the | ||
164 | * memory layout. Restore just the FP/SSE and init all | ||
165 | * the other extended state. | ||
166 | */ | ||
167 | xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); | ||
168 | return fxrstor_checking((__force struct i387_fxsave_struct *)buf); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * This restores directly out of user space. Exceptions are handled. | ||
173 | */ | ||
174 | int restore_i387_xstate(void __user *buf) | ||
175 | { | ||
176 | struct task_struct *tsk = current; | ||
177 | int err = 0; | ||
178 | |||
179 | if (!buf) { | ||
180 | if (used_math()) | ||
181 | goto clear; | ||
182 | return 0; | ||
183 | } else | ||
184 | if (!access_ok(VERIFY_READ, buf, sig_xstate_size)) | ||
185 | return -EACCES; | ||
186 | |||
187 | if (!used_math()) { | ||
188 | err = init_fpu(tsk); | ||
189 | if (err) | ||
190 | return err; | ||
191 | } | ||
192 | |||
193 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
194 | clts(); | ||
195 | task_thread_info(current)->status |= TS_USEDFPU; | ||
196 | } | ||
197 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
198 | err = restore_user_xstate(buf); | ||
199 | else | ||
200 | err = fxrstor_checking((__force struct i387_fxsave_struct *) | ||
201 | buf); | ||
202 | if (unlikely(err)) { | ||
203 | /* | ||
204 | * Encountered an error while doing the restore from the | ||
205 | * user buffer, clear the fpu state. | ||
206 | */ | ||
207 | clear: | ||
208 | clear_fpu(tsk); | ||
209 | clear_used_math(); | ||
210 | } | ||
211 | return err; | ||
212 | } | ||
213 | #endif | ||
214 | |||
215 | /* | ||
216 | * Prepare the SW reserved portion of the fxsave memory layout, indicating | ||
217 | * the presence of the extended state information in the memory layout | ||
218 | * pointed by the fpstate pointer in the sigcontext. | ||
219 | * This will be saved when ever the FP and extended state context is | ||
220 | * saved on the user stack during the signal handler delivery to the user. | ||
221 | */ | ||
222 | void prepare_fx_sw_frame(void) | ||
223 | { | ||
224 | int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) + | ||
225 | FP_XSTATE_MAGIC2_SIZE; | ||
226 | |||
227 | sig_xstate_size = sizeof(struct _fpstate) + size_extended; | ||
228 | |||
229 | #ifdef CONFIG_IA32_EMULATION | ||
230 | sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended; | ||
231 | #endif | ||
232 | |||
233 | memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved)); | ||
234 | |||
235 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; | ||
236 | fx_sw_reserved.extended_size = sig_xstate_size; | ||
237 | fx_sw_reserved.xstate_bv = pcntxt_mask; | ||
238 | fx_sw_reserved.xstate_size = xstate_size; | ||
239 | #ifdef CONFIG_IA32_EMULATION | ||
240 | memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved, | ||
241 | sizeof(struct _fpx_sw_bytes)); | ||
242 | fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size; | ||
243 | #endif | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Represents init state for the supported extended state. | ||
248 | */ | ||
249 | struct xsave_struct *init_xstate_buf; | ||
250 | |||
251 | #ifdef CONFIG_X86_64 | ||
252 | unsigned int sig_xstate_size = sizeof(struct _fpstate); | ||
253 | #endif | ||
254 | |||
255 | /* | ||
256 | * Enable the extended processor state save/restore feature | ||
257 | */ | ||
258 | void __cpuinit xsave_init(void) | ||
259 | { | ||
260 | if (!cpu_has_xsave) | ||
261 | return; | ||
262 | |||
263 | set_in_cr4(X86_CR4_OSXSAVE); | ||
264 | |||
265 | /* | ||
266 | * Enable all the features that the HW is capable of | ||
267 | * and the Linux kernel is aware of. | ||
268 | */ | ||
269 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * setup the xstate image representing the init state | ||
274 | */ | ||
275 | void setup_xstate_init(void) | ||
276 | { | ||
277 | init_xstate_buf = alloc_bootmem(xstate_size); | ||
278 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Enable and initialize the xsave feature. | ||
283 | */ | ||
284 | void __init xsave_cntxt_init(void) | ||
285 | { | ||
286 | unsigned int eax, ebx, ecx, edx; | ||
287 | |||
288 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | ||
289 | pcntxt_mask = eax + ((u64)edx << 32); | ||
290 | |||
291 | if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { | ||
292 | printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n", | ||
293 | pcntxt_mask); | ||
294 | BUG(); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * for now OS knows only about FP/SSE | ||
299 | */ | ||
300 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; | ||
301 | xsave_init(); | ||
302 | |||
303 | /* | ||
304 | * Recompute the context size for enabled features | ||
305 | */ | ||
306 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | ||
307 | xstate_size = ebx; | ||
308 | |||
309 | prepare_fx_sw_frame(); | ||
310 | |||
311 | setup_xstate_init(); | ||
312 | |||
313 | printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, " | ||
314 | "cntxt size 0x%x\n", | ||
315 | pcntxt_mask, xstate_size); | ||
316 | } | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index d9249a882aa5..65f0b8a47bed 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/lguest_launcher.h> | 55 | #include <linux/lguest_launcher.h> |
56 | #include <linux/virtio_console.h> | 56 | #include <linux/virtio_console.h> |
57 | #include <linux/pm.h> | 57 | #include <linux/pm.h> |
58 | #include <asm/apic.h> | ||
58 | #include <asm/lguest.h> | 59 | #include <asm/lguest.h> |
59 | #include <asm/paravirt.h> | 60 | #include <asm/paravirt.h> |
60 | #include <asm/param.h> | 61 | #include <asm/param.h> |
@@ -783,14 +784,44 @@ static void lguest_wbinvd(void) | |||
783 | * code qualifies for Advanced. It will also never interrupt anything. It | 784 | * code qualifies for Advanced. It will also never interrupt anything. It |
784 | * does, however, allow us to get through the Linux boot code. */ | 785 | * does, however, allow us to get through the Linux boot code. */ |
785 | #ifdef CONFIG_X86_LOCAL_APIC | 786 | #ifdef CONFIG_X86_LOCAL_APIC |
786 | static void lguest_apic_write(unsigned long reg, u32 v) | 787 | static void lguest_apic_write(u32 reg, u32 v) |
787 | { | 788 | { |
788 | } | 789 | } |
789 | 790 | ||
790 | static u32 lguest_apic_read(unsigned long reg) | 791 | static u32 lguest_apic_read(u32 reg) |
791 | { | 792 | { |
792 | return 0; | 793 | return 0; |
793 | } | 794 | } |
795 | |||
796 | static u64 lguest_apic_icr_read(void) | ||
797 | { | ||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | static void lguest_apic_icr_write(u32 low, u32 id) | ||
802 | { | ||
803 | /* Warn to see if there's any stray references */ | ||
804 | WARN_ON(1); | ||
805 | } | ||
806 | |||
807 | static void lguest_apic_wait_icr_idle(void) | ||
808 | { | ||
809 | return; | ||
810 | } | ||
811 | |||
812 | static u32 lguest_apic_safe_wait_icr_idle(void) | ||
813 | { | ||
814 | return 0; | ||
815 | } | ||
816 | |||
817 | static struct apic_ops lguest_basic_apic_ops = { | ||
818 | .read = lguest_apic_read, | ||
819 | .write = lguest_apic_write, | ||
820 | .icr_read = lguest_apic_icr_read, | ||
821 | .icr_write = lguest_apic_icr_write, | ||
822 | .wait_icr_idle = lguest_apic_wait_icr_idle, | ||
823 | .safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle, | ||
824 | }; | ||
794 | #endif | 825 | #endif |
795 | 826 | ||
796 | /* STOP! Until an interrupt comes in. */ | 827 | /* STOP! Until an interrupt comes in. */ |
@@ -990,8 +1021,7 @@ __init void lguest_init(void) | |||
990 | 1021 | ||
991 | #ifdef CONFIG_X86_LOCAL_APIC | 1022 | #ifdef CONFIG_X86_LOCAL_APIC |
992 | /* apic read/write intercepts */ | 1023 | /* apic read/write intercepts */ |
993 | pv_apic_ops.apic_write = lguest_apic_write; | 1024 | apic_ops = &lguest_basic_apic_ops; |
994 | pv_apic_ops.apic_read = lguest_apic_read; | ||
995 | #endif | 1025 | #endif |
996 | 1026 | ||
997 | /* time operations */ | 1027 | /* time operations */ |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index aa3fa4119424..55e11aa6d66c 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -17,9 +17,6 @@ ifeq ($(CONFIG_X86_32),y) | |||
17 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o | 17 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o |
18 | else | 18 | else |
19 | obj-y += io_64.o iomap_copy_64.o | 19 | obj-y += io_64.o iomap_copy_64.o |
20 | |||
21 | CFLAGS_csum-partial_64.o := -funroll-loops | ||
22 | |||
23 | lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o | 20 | lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o |
24 | lib-y += thunk_64.o clear_page_64.o copy_page_64.o | 21 | lib-y += thunk_64.o clear_page_64.o copy_page_64.o |
25 | lib-y += memmove_64.o memset_64.o | 22 | lib-y += memmove_64.o memset_64.o |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 24e60944971a..9e68075544f6 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -14,6 +14,13 @@ | |||
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/mmx.h> | 15 | #include <asm/mmx.h> |
16 | 16 | ||
17 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
18 | /* | ||
19 | * Alignment at which movsl is preferred for bulk memory copies. | ||
20 | */ | ||
21 | struct movsl_mask movsl_mask __read_mostly; | ||
22 | #endif | ||
23 | |||
17 | static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) | 24 | static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) |
18 | { | 25 | { |
19 | #ifdef CONFIG_X86_INTEL_USERCOPY | 26 | #ifdef CONFIG_X86_INTEL_USERCOPY |
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 3d317836be9e..37b9ae4d44c5 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -10,13 +10,15 @@ | |||
10 | #include <asm/e820.h> | 10 | #include <asm/e820.h> |
11 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
12 | 12 | ||
13 | #include <mach_ipi.h> | ||
14 | |||
13 | #ifdef CONFIG_HOTPLUG_CPU | 15 | #ifdef CONFIG_HOTPLUG_CPU |
14 | #define DEFAULT_SEND_IPI (1) | 16 | #define DEFAULT_SEND_IPI (1) |
15 | #else | 17 | #else |
16 | #define DEFAULT_SEND_IPI (0) | 18 | #define DEFAULT_SEND_IPI (0) |
17 | #endif | 19 | #endif |
18 | 20 | ||
19 | int no_broadcast=DEFAULT_SEND_IPI; | 21 | int no_broadcast = DEFAULT_SEND_IPI; |
20 | 22 | ||
21 | /** | 23 | /** |
22 | * pre_intr_init_hook - initialisation prior to setting up interrupt vectors | 24 | * pre_intr_init_hook - initialisation prior to setting up interrupt vectors |
@@ -36,15 +38,6 @@ void __init pre_intr_init_hook(void) | |||
36 | init_ISA_irqs(); | 38 | init_ISA_irqs(); |
37 | } | 39 | } |
38 | 40 | ||
39 | /* | ||
40 | * IRQ2 is cascade interrupt to second interrupt controller | ||
41 | */ | ||
42 | static struct irqaction irq2 = { | ||
43 | .handler = no_action, | ||
44 | .mask = CPU_MASK_NONE, | ||
45 | .name = "cascade", | ||
46 | }; | ||
47 | |||
48 | /** | 41 | /** |
49 | * intr_init_hook - post gate setup interrupt initialisation | 42 | * intr_init_hook - post gate setup interrupt initialisation |
50 | * | 43 | * |
@@ -60,12 +53,6 @@ void __init intr_init_hook(void) | |||
60 | if (x86_quirks->arch_intr_init()) | 53 | if (x86_quirks->arch_intr_init()) |
61 | return; | 54 | return; |
62 | } | 55 | } |
63 | #ifdef CONFIG_X86_LOCAL_APIC | ||
64 | apic_intr_init(); | ||
65 | #endif | ||
66 | |||
67 | if (!acpi_ioapic) | ||
68 | setup_irq(2, &irq2); | ||
69 | } | 56 | } |
70 | 57 | ||
71 | /** | 58 | /** |
diff --git a/arch/x86/mach-es7000/Makefile b/arch/x86/mach-es7000/Makefile deleted file mode 100644 index 3ef8b43b62fc..000000000000 --- a/arch/x86/mach-es7000/Makefile +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_X86_ES7000) := es7000plat.o | ||
diff --git a/arch/x86/mach-es7000/es7000.h b/arch/x86/mach-es7000/es7000.h deleted file mode 100644 index c8d5aa132fa0..000000000000 --- a/arch/x86/mach-es7000/es7000.h +++ /dev/null | |||
@@ -1,114 +0,0 @@ | |||
1 | /* | ||
2 | * Written by: Garry Forsgren, Unisys Corporation | ||
3 | * Natalie Protasevich, Unisys Corporation | ||
4 | * This file contains the code to configure and interface | ||
5 | * with Unisys ES7000 series hardware system manager. | ||
6 | * | ||
7 | * Copyright (c) 2003 Unisys Corporation. All Rights Reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of version 2 of the GNU General Public License as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it would be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
20 | * | ||
21 | * Contact information: Unisys Corporation, Township Line & Union Meeting | ||
22 | * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: | ||
23 | * | ||
24 | * http://www.unisys.com | ||
25 | */ | ||
26 | |||
27 | /* | ||
28 | * ES7000 chipsets | ||
29 | */ | ||
30 | |||
31 | #define NON_UNISYS 0 | ||
32 | #define ES7000_CLASSIC 1 | ||
33 | #define ES7000_ZORRO 2 | ||
34 | |||
35 | |||
36 | #define MIP_REG 1 | ||
37 | #define MIP_PSAI_REG 4 | ||
38 | |||
39 | #define MIP_BUSY 1 | ||
40 | #define MIP_SPIN 0xf0000 | ||
41 | #define MIP_VALID 0x0100000000000000ULL | ||
42 | #define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) | ||
43 | |||
44 | #define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) | ||
45 | |||
46 | struct mip_reg_info { | ||
47 | unsigned long long mip_info; | ||
48 | unsigned long long delivery_info; | ||
49 | unsigned long long host_reg; | ||
50 | unsigned long long mip_reg; | ||
51 | }; | ||
52 | |||
53 | struct part_info { | ||
54 | unsigned char type; | ||
55 | unsigned char length; | ||
56 | unsigned char part_id; | ||
57 | unsigned char apic_mode; | ||
58 | unsigned long snum; | ||
59 | char ptype[16]; | ||
60 | char sname[64]; | ||
61 | char pname[64]; | ||
62 | }; | ||
63 | |||
64 | struct psai { | ||
65 | unsigned long long entry_type; | ||
66 | unsigned long long addr; | ||
67 | unsigned long long bep_addr; | ||
68 | }; | ||
69 | |||
70 | struct es7000_mem_info { | ||
71 | unsigned char type; | ||
72 | unsigned char length; | ||
73 | unsigned char resv[6]; | ||
74 | unsigned long long start; | ||
75 | unsigned long long size; | ||
76 | }; | ||
77 | |||
78 | struct es7000_oem_table { | ||
79 | unsigned long long hdr; | ||
80 | struct mip_reg_info mip; | ||
81 | struct part_info pif; | ||
82 | struct es7000_mem_info shm; | ||
83 | struct psai psai; | ||
84 | }; | ||
85 | |||
86 | #ifdef CONFIG_ACPI | ||
87 | |||
88 | struct oem_table { | ||
89 | struct acpi_table_header Header; | ||
90 | u32 OEMTableAddr; | ||
91 | u32 OEMTableSize; | ||
92 | }; | ||
93 | |||
94 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | ||
95 | #endif | ||
96 | |||
97 | struct mip_reg { | ||
98 | unsigned long long off_0; | ||
99 | unsigned long long off_8; | ||
100 | unsigned long long off_10; | ||
101 | unsigned long long off_18; | ||
102 | unsigned long long off_20; | ||
103 | unsigned long long off_28; | ||
104 | unsigned long long off_30; | ||
105 | unsigned long long off_38; | ||
106 | }; | ||
107 | |||
108 | #define MIP_SW_APIC 0x1020b | ||
109 | #define MIP_FUNC(VALUE) (VALUE & 0xff) | ||
110 | |||
111 | extern int parse_unisys_oem (char *oemptr); | ||
112 | extern void setup_unisys(void); | ||
113 | extern int es7000_start_cpu(int cpu, unsigned long eip); | ||
114 | extern void es7000_sw_apic(void); | ||
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 0dbd7803a1d5..6730f4e7c744 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile | |||
@@ -9,4 +9,3 @@ obj-$(CONFIG_X86_NUMAQ) += numaq.o | |||
9 | obj-$(CONFIG_X86_SUMMIT) += summit.o | 9 | obj-$(CONFIG_X86_SUMMIT) += summit.o |
10 | obj-$(CONFIG_X86_BIGSMP) += bigsmp.o | 10 | obj-$(CONFIG_X86_BIGSMP) += bigsmp.o |
11 | obj-$(CONFIG_X86_ES7000) += es7000.o | 11 | obj-$(CONFIG_X86_ES7000) += es7000.o |
12 | obj-$(CONFIG_X86_ES7000) += ../../x86/mach-es7000/ | ||
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 59d771714559..df37fc9d6a26 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -5,18 +5,17 @@ | |||
5 | #define APIC_DEFINITION 1 | 5 | #define APIC_DEFINITION 1 |
6 | #include <linux/threads.h> | 6 | #include <linux/threads.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <asm/smp.h> | ||
9 | #include <asm/mpspec.h> | 8 | #include <asm/mpspec.h> |
10 | #include <asm/genapic.h> | 9 | #include <asm/genapic.h> |
11 | #include <asm/fixmap.h> | 10 | #include <asm/fixmap.h> |
12 | #include <asm/apicdef.h> | 11 | #include <asm/apicdef.h> |
13 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <linux/dmi.h> | 14 | #include <linux/dmi.h> |
17 | #include <asm/mach-bigsmp/mach_apic.h> | 15 | #include <asm/bigsmp/apicdef.h> |
18 | #include <asm/mach-bigsmp/mach_apicdef.h> | 16 | #include <linux/smp.h> |
19 | #include <asm/mach-bigsmp/mach_ipi.h> | 17 | #include <asm/bigsmp/apic.h> |
18 | #include <asm/bigsmp/ipi.h> | ||
20 | #include <asm/mach-default/mach_mpparse.h> | 19 | #include <asm/mach-default/mach_mpparse.h> |
21 | 20 | ||
22 | static int dmi_bigsmp; /* can be set by dmi scanners */ | 21 | static int dmi_bigsmp; /* can be set by dmi scanners */ |
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 4742626f08c4..520cca0ee04e 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c | |||
@@ -4,20 +4,19 @@ | |||
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
7 | #include <asm/smp.h> | ||
8 | #include <asm/mpspec.h> | 7 | #include <asm/mpspec.h> |
9 | #include <asm/genapic.h> | 8 | #include <asm/genapic.h> |
10 | #include <asm/fixmap.h> | 9 | #include <asm/fixmap.h> |
11 | #include <asm/apicdef.h> | 10 | #include <asm/apicdef.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <asm/mach-es7000/mach_apicdef.h> | 14 | #include <asm/es7000/apicdef.h> |
17 | #include <asm/mach-es7000/mach_apic.h> | 15 | #include <linux/smp.h> |
18 | #include <asm/mach-es7000/mach_ipi.h> | 16 | #include <asm/es7000/apic.h> |
19 | #include <asm/mach-es7000/mach_mpparse.h> | 17 | #include <asm/es7000/ipi.h> |
20 | #include <asm/mach-es7000/mach_wakecpu.h> | 18 | #include <asm/es7000/mpparse.h> |
19 | #include <asm/es7000/wakecpu.h> | ||
21 | 20 | ||
22 | static int probe_es7000(void) | 21 | static int probe_es7000(void) |
23 | { | 22 | { |
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 8091e68764c4..8cf58394975e 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c | |||
@@ -4,7 +4,6 @@ | |||
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
7 | #include <linux/smp.h> | ||
8 | #include <asm/mpspec.h> | 7 | #include <asm/mpspec.h> |
9 | #include <asm/genapic.h> | 8 | #include <asm/genapic.h> |
10 | #include <asm/fixmap.h> | 9 | #include <asm/fixmap.h> |
@@ -12,11 +11,12 @@ | |||
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
15 | #include <asm/mach-numaq/mach_apic.h> | 14 | #include <asm/numaq/apicdef.h> |
16 | #include <asm/mach-numaq/mach_apicdef.h> | 15 | #include <linux/smp.h> |
17 | #include <asm/mach-numaq/mach_ipi.h> | 16 | #include <asm/numaq/apic.h> |
18 | #include <asm/mach-numaq/mach_mpparse.h> | 17 | #include <asm/numaq/ipi.h> |
19 | #include <asm/mach-numaq/mach_wakecpu.h> | 18 | #include <asm/numaq/mpparse.h> |
19 | #include <asm/numaq/wakecpu.h> | ||
20 | #include <asm/numaq.h> | 20 | #include <asm/numaq.h> |
21 | 21 | ||
22 | static int mps_oem_check(struct mp_config_table *mpc, char *oem, | 22 | static int mps_oem_check(struct mp_config_table *mpc, char *oem, |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index a97ea0f35b1e..6ad6b67a723d 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -4,19 +4,18 @@ | |||
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
7 | #include <asm/smp.h> | ||
8 | #include <asm/mpspec.h> | 7 | #include <asm/mpspec.h> |
9 | #include <asm/genapic.h> | 8 | #include <asm/genapic.h> |
10 | #include <asm/fixmap.h> | 9 | #include <asm/fixmap.h> |
11 | #include <asm/apicdef.h> | 10 | #include <asm/apicdef.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <asm/mach-summit/mach_apic.h> | 14 | #include <asm/summit/apicdef.h> |
17 | #include <asm/mach-summit/mach_apicdef.h> | 15 | #include <linux/smp.h> |
18 | #include <asm/mach-summit/mach_ipi.h> | 16 | #include <asm/summit/apic.h> |
19 | #include <asm/mach-summit/mach_mpparse.h> | 17 | #include <asm/summit/ipi.h> |
18 | #include <asm/summit/mpparse.h> | ||
20 | 19 | ||
21 | static int probe_summit(void) | 20 | static int probe_summit(void) |
22 | { | 21 | { |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 455f3fe67b42..8f92cac4e6db 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/proto.h> | 36 | #include <asm/proto.h> |
37 | #include <asm-generic/sections.h> | 37 | #include <asm-generic/sections.h> |
38 | #include <asm/traps.h> | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Page fault error code bits | 41 | * Page fault error code bits |
@@ -357,8 +358,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address) | |||
357 | return 0; | 358 | return 0; |
358 | } | 359 | } |
359 | 360 | ||
360 | void do_invalid_op(struct pt_regs *, unsigned long); | ||
361 | |||
362 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | 361 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
363 | { | 362 | { |
364 | #ifdef CONFIG_X86_F00F_BUG | 363 | #ifdef CONFIG_X86_F00F_BUG |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index d37f29376b0c..4974e97dedfe 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/paravirt.h> | 47 | #include <asm/paravirt.h> |
48 | #include <asm/setup.h> | 48 | #include <asm/setup.h> |
49 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
50 | #include <asm/smp.h> | ||
50 | 51 | ||
51 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 52 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
52 | 53 | ||
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d4b6e6a29ae3..cac6da54203b 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -421,7 +421,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |||
421 | return; | 421 | return; |
422 | } | 422 | } |
423 | 423 | ||
424 | int __initdata early_ioremap_debug; | 424 | static int __initdata early_ioremap_debug; |
425 | 425 | ||
426 | static int __init early_ioremap_debug_setup(char *str) | 426 | static int __init early_ioremap_debug_setup(char *str) |
427 | { | 427 | { |
@@ -547,7 +547,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | 549 | ||
550 | int __initdata early_ioremap_nested; | 550 | static int __initdata early_ioremap_nested; |
551 | 551 | ||
552 | static int __init check_early_ioremap_leak(void) | 552 | static int __init check_early_ioremap_leak(void) |
553 | { | 553 | { |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 19af06927fbc..1d88d2b39771 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -250,10 +250,5 @@ int __init pci_acpi_init(void) | |||
250 | acpi_pci_irq_enable(dev); | 250 | acpi_pci_irq_enable(dev); |
251 | } | 251 | } |
252 | 252 | ||
253 | #ifdef CONFIG_X86_IO_APIC | ||
254 | if (acpi_ioapic) | ||
255 | print_IO_APIC(); | ||
256 | #endif | ||
257 | |||
258 | return 0; | 253 | return 0; |
259 | } | 254 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 8791fc55e715..844df0cbbd3e 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | 34 | ||
35 | #include <asm/pat.h> | 35 | #include <asm/pat.h> |
36 | #include <asm/e820.h> | ||
36 | 37 | ||
37 | #include "pci.h" | 38 | #include "pci.h" |
38 | 39 | ||
@@ -227,6 +228,8 @@ void __init pcibios_resource_survey(void) | |||
227 | pcibios_allocate_bus_resources(&pci_root_buses); | 228 | pcibios_allocate_bus_resources(&pci_root_buses); |
228 | pcibios_allocate_resources(0); | 229 | pcibios_allocate_resources(0); |
229 | pcibios_allocate_resources(1); | 230 | pcibios_allocate_resources(1); |
231 | |||
232 | e820_reserve_resources_late(); | ||
230 | } | 233 | } |
231 | 234 | ||
232 | /** | 235 | /** |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index d9635764ce3d..654a2234f8f3 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -209,7 +209,7 @@ static int __init pci_mmcfg_check_hostbridge(void) | |||
209 | return name != NULL; | 209 | return name != NULL; |
210 | } | 210 | } |
211 | 211 | ||
212 | static void __init pci_mmcfg_insert_resources(unsigned long resource_flags) | 212 | static void __init pci_mmcfg_insert_resources(void) |
213 | { | 213 | { |
214 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 | 214 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 |
215 | int i; | 215 | int i; |
@@ -233,7 +233,7 @@ static void __init pci_mmcfg_insert_resources(unsigned long resource_flags) | |||
233 | cfg->pci_segment); | 233 | cfg->pci_segment); |
234 | res->start = cfg->address; | 234 | res->start = cfg->address; |
235 | res->end = res->start + (num_buses << 20) - 1; | 235 | res->end = res->start + (num_buses << 20) - 1; |
236 | res->flags = IORESOURCE_MEM | resource_flags; | 236 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
237 | insert_resource(&iomem_resource, res); | 237 | insert_resource(&iomem_resource, res); |
238 | names += PCI_MMCFG_RESOURCE_NAME_LEN; | 238 | names += PCI_MMCFG_RESOURCE_NAME_LEN; |
239 | } | 239 | } |
@@ -434,11 +434,9 @@ static void __init __pci_mmcfg_init(int early) | |||
434 | (pci_mmcfg_config[0].address == 0)) | 434 | (pci_mmcfg_config[0].address == 0)) |
435 | return; | 435 | return; |
436 | 436 | ||
437 | if (pci_mmcfg_arch_init()) { | 437 | if (pci_mmcfg_arch_init()) |
438 | if (known_bridge) | ||
439 | pci_mmcfg_insert_resources(IORESOURCE_BUSY); | ||
440 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 438 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; |
441 | } else { | 439 | else { |
442 | /* | 440 | /* |
443 | * Signal not to attempt to insert mmcfg resources because | 441 | * Signal not to attempt to insert mmcfg resources because |
444 | * the architecture mmcfg setup could not initialize. | 442 | * the architecture mmcfg setup could not initialize. |
@@ -475,7 +473,7 @@ static int __init pci_mmcfg_late_insert_resources(void) | |||
475 | * marked so it won't cause request errors when __request_region is | 473 | * marked so it won't cause request errors when __request_region is |
476 | * called. | 474 | * called. |
477 | */ | 475 | */ |
478 | pci_mmcfg_insert_resources(0); | 476 | pci_mmcfg_insert_resources(); |
479 | 477 | ||
480 | return 0; | 478 | return 0; |
481 | } | 479 | } |
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index d3e083dea720..274d06082f48 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/suspend.h> | 11 | #include <linux/suspend.h> |
12 | #include <asm/mtrr.h> | 12 | #include <asm/mtrr.h> |
13 | #include <asm/mce.h> | 13 | #include <asm/mce.h> |
14 | #include <asm/xcr.h> | ||
14 | 15 | ||
15 | static struct saved_context saved_context; | 16 | static struct saved_context saved_context; |
16 | 17 | ||
@@ -126,6 +127,12 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
126 | if (boot_cpu_has(X86_FEATURE_SEP)) | 127 | if (boot_cpu_has(X86_FEATURE_SEP)) |
127 | enable_sep_cpu(); | 128 | enable_sep_cpu(); |
128 | 129 | ||
130 | /* | ||
131 | * restore XCR0 for xsave capable cpu's. | ||
132 | */ | ||
133 | if (cpu_has_xsave) | ||
134 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
135 | |||
129 | fix_processor_context(); | 136 | fix_processor_context(); |
130 | do_fpu_end(); | 137 | do_fpu_end(); |
131 | mtrr_ap_init(); | 138 | mtrr_ap_init(); |
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 66bdfb591fd8..e3b6cf70d62c 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/mtrr.h> | 16 | #include <asm/mtrr.h> |
17 | #include <asm/xcr.h> | ||
17 | 18 | ||
18 | static void fix_processor_context(void); | 19 | static void fix_processor_context(void); |
19 | 20 | ||
@@ -122,6 +123,12 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
122 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | 123 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); |
123 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 124 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
124 | 125 | ||
126 | /* | ||
127 | * restore XCR0 for xsave capable cpu's. | ||
128 | */ | ||
129 | if (cpu_has_xsave) | ||
130 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
131 | |||
125 | fix_processor_context(); | 132 | fix_processor_context(); |
126 | 133 | ||
127 | do_fpu_end(); | 134 | do_fpu_end(); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 9ff6e3cbf08f..8d28925ebed9 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <xen/hvc-console.h> | 36 | #include <xen/hvc-console.h> |
37 | 37 | ||
38 | #include <asm/paravirt.h> | 38 | #include <asm/paravirt.h> |
39 | #include <asm/apic.h> | ||
39 | #include <asm/page.h> | 40 | #include <asm/page.h> |
40 | #include <asm/xen/hypercall.h> | 41 | #include <asm/xen/hypercall.h> |
41 | #include <asm/xen/hypervisor.h> | 42 | #include <asm/xen/hypervisor.h> |
@@ -580,16 +581,47 @@ static void xen_io_delay(void) | |||
580 | } | 581 | } |
581 | 582 | ||
582 | #ifdef CONFIG_X86_LOCAL_APIC | 583 | #ifdef CONFIG_X86_LOCAL_APIC |
583 | static u32 xen_apic_read(unsigned long reg) | 584 | static u32 xen_apic_read(u32 reg) |
584 | { | 585 | { |
585 | return 0; | 586 | return 0; |
586 | } | 587 | } |
587 | 588 | ||
588 | static void xen_apic_write(unsigned long reg, u32 val) | 589 | static void xen_apic_write(u32 reg, u32 val) |
589 | { | 590 | { |
590 | /* Warn to see if there's any stray references */ | 591 | /* Warn to see if there's any stray references */ |
591 | WARN_ON(1); | 592 | WARN_ON(1); |
592 | } | 593 | } |
594 | |||
595 | static u64 xen_apic_icr_read(void) | ||
596 | { | ||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | static void xen_apic_icr_write(u32 low, u32 id) | ||
601 | { | ||
602 | /* Warn to see if there's any stray references */ | ||
603 | WARN_ON(1); | ||
604 | } | ||
605 | |||
606 | static void xen_apic_wait_icr_idle(void) | ||
607 | { | ||
608 | return; | ||
609 | } | ||
610 | |||
611 | static u32 xen_safe_apic_wait_icr_idle(void) | ||
612 | { | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static struct apic_ops xen_basic_apic_ops = { | ||
617 | .read = xen_apic_read, | ||
618 | .write = xen_apic_write, | ||
619 | .icr_read = xen_apic_icr_read, | ||
620 | .icr_write = xen_apic_icr_write, | ||
621 | .wait_icr_idle = xen_apic_wait_icr_idle, | ||
622 | .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle, | ||
623 | }; | ||
624 | |||
593 | #endif | 625 | #endif |
594 | 626 | ||
595 | static void xen_flush_tlb(void) | 627 | static void xen_flush_tlb(void) |
@@ -1273,8 +1305,6 @@ static const struct pv_irq_ops xen_irq_ops __initdata = { | |||
1273 | 1305 | ||
1274 | static const struct pv_apic_ops xen_apic_ops __initdata = { | 1306 | static const struct pv_apic_ops xen_apic_ops __initdata = { |
1275 | #ifdef CONFIG_X86_LOCAL_APIC | 1307 | #ifdef CONFIG_X86_LOCAL_APIC |
1276 | .apic_write = xen_apic_write, | ||
1277 | .apic_read = xen_apic_read, | ||
1278 | .setup_boot_clock = paravirt_nop, | 1308 | .setup_boot_clock = paravirt_nop, |
1279 | .setup_secondary_clock = paravirt_nop, | 1309 | .setup_secondary_clock = paravirt_nop, |
1280 | .startup_ipi_hook = paravirt_nop, | 1310 | .startup_ipi_hook = paravirt_nop, |
@@ -1677,6 +1707,13 @@ asmlinkage void __init xen_start_kernel(void) | |||
1677 | pv_apic_ops = xen_apic_ops; | 1707 | pv_apic_ops = xen_apic_ops; |
1678 | pv_mmu_ops = xen_mmu_ops; | 1708 | pv_mmu_ops = xen_mmu_ops; |
1679 | 1709 | ||
1710 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1711 | /* | ||
1712 | * set up the basic apic ops. | ||
1713 | */ | ||
1714 | apic_ops = &xen_basic_apic_ops; | ||
1715 | #endif | ||
1716 | |||
1680 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { | 1717 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { |
1681 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; | 1718 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; |
1682 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; | 1719 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 7d63f8ced24b..4b47f4ece5b7 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -26,6 +26,8 @@ obj-$(CONFIG_HT_IRQ) += htirq.o | |||
26 | # Build Intel IOMMU support | 26 | # Build Intel IOMMU support |
27 | obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o | 27 | obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o |
28 | 28 | ||
29 | obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o | ||
30 | |||
29 | # | 31 | # |
30 | # Some architectures use the generic PCI setup functions | 32 | # Some architectures use the generic PCI setup functions |
31 | # | 33 | # |
diff --git a/drivers/pci/dma_remapping.h b/drivers/pci/dma_remapping.h new file mode 100644 index 000000000000..bff5c65f81dc --- /dev/null +++ b/drivers/pci/dma_remapping.h | |||
@@ -0,0 +1,157 @@ | |||
1 | #ifndef _DMA_REMAPPING_H | ||
2 | #define _DMA_REMAPPING_H | ||
3 | |||
4 | /* | ||
5 | * We need a fixed PAGE_SIZE of 4K irrespective of | ||
6 | * arch PAGE_SIZE for IOMMU page tables. | ||
7 | */ | ||
8 | #define PAGE_SHIFT_4K (12) | ||
9 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | ||
10 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | ||
11 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | ||
12 | |||
13 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | ||
14 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
15 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
16 | |||
17 | |||
18 | /* | ||
19 | * 0: Present | ||
20 | * 1-11: Reserved | ||
21 | * 12-63: Context Ptr (12 - (haw-1)) | ||
22 | * 64-127: Reserved | ||
23 | */ | ||
24 | struct root_entry { | ||
25 | u64 val; | ||
26 | u64 rsvd1; | ||
27 | }; | ||
28 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | ||
29 | static inline bool root_present(struct root_entry *root) | ||
30 | { | ||
31 | return (root->val & 1); | ||
32 | } | ||
33 | static inline void set_root_present(struct root_entry *root) | ||
34 | { | ||
35 | root->val |= 1; | ||
36 | } | ||
37 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
38 | { | ||
39 | root->val |= value & PAGE_MASK_4K; | ||
40 | } | ||
41 | |||
42 | struct context_entry; | ||
43 | static inline struct context_entry * | ||
44 | get_context_addr_from_root(struct root_entry *root) | ||
45 | { | ||
46 | return (struct context_entry *) | ||
47 | (root_present(root)?phys_to_virt( | ||
48 | root->val & PAGE_MASK_4K): | ||
49 | NULL); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * low 64 bits: | ||
54 | * 0: present | ||
55 | * 1: fault processing disable | ||
56 | * 2-3: translation type | ||
57 | * 12-63: address space root | ||
58 | * high 64 bits: | ||
59 | * 0-2: address width | ||
60 | * 3-6: aval | ||
61 | * 8-23: domain id | ||
62 | */ | ||
63 | struct context_entry { | ||
64 | u64 lo; | ||
65 | u64 hi; | ||
66 | }; | ||
67 | #define context_present(c) ((c).lo & 1) | ||
68 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | ||
69 | #define context_translation_type(c) (((c).lo >> 2) & 3) | ||
70 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | ||
71 | #define context_address_width(c) ((c).hi & 7) | ||
72 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | ||
73 | |||
74 | #define context_set_present(c) do {(c).lo |= 1;} while (0) | ||
75 | #define context_set_fault_enable(c) \ | ||
76 | do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) | ||
77 | #define context_set_translation_type(c, val) \ | ||
78 | do { \ | ||
79 | (c).lo &= (((u64)-1) << 4) | 3; \ | ||
80 | (c).lo |= ((val) & 3) << 2; \ | ||
81 | } while (0) | ||
82 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
83 | #define context_set_address_root(c, val) \ | ||
84 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | ||
85 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | ||
86 | #define context_set_domain_id(c, val) \ | ||
87 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | ||
88 | #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) | ||
89 | |||
90 | /* | ||
91 | * 0: readable | ||
92 | * 1: writable | ||
93 | * 2-6: reserved | ||
94 | * 7: super page | ||
95 | * 8-11: available | ||
96 | * 12-63: Host physcial address | ||
97 | */ | ||
98 | struct dma_pte { | ||
99 | u64 val; | ||
100 | }; | ||
101 | #define dma_clear_pte(p) do {(p).val = 0;} while (0) | ||
102 | |||
103 | #define DMA_PTE_READ (1) | ||
104 | #define DMA_PTE_WRITE (2) | ||
105 | |||
106 | #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) | ||
107 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | ||
108 | #define dma_set_pte_prot(p, prot) \ | ||
109 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | ||
110 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | ||
111 | #define dma_set_pte_addr(p, addr) do {\ | ||
112 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | ||
113 | #define dma_pte_present(p) (((p).val & 3) != 0) | ||
114 | |||
115 | struct intel_iommu; | ||
116 | |||
117 | struct dmar_domain { | ||
118 | int id; /* domain id */ | ||
119 | struct intel_iommu *iommu; /* back pointer to owning iommu */ | ||
120 | |||
121 | struct list_head devices; /* all devices' list */ | ||
122 | struct iova_domain iovad; /* iova's that belong to this domain */ | ||
123 | |||
124 | struct dma_pte *pgd; /* virtual address */ | ||
125 | spinlock_t mapping_lock; /* page table lock */ | ||
126 | int gaw; /* max guest address width */ | ||
127 | |||
128 | /* adjusted guest address width, 0 is level 2 30-bit */ | ||
129 | int agaw; | ||
130 | |||
131 | #define DOMAIN_FLAG_MULTIPLE_DEVICES 1 | ||
132 | int flags; | ||
133 | }; | ||
134 | |||
135 | /* PCI domain-device relationship */ | ||
136 | struct device_domain_info { | ||
137 | struct list_head link; /* link to domain siblings */ | ||
138 | struct list_head global; /* link to global list */ | ||
139 | u8 bus; /* PCI bus numer */ | ||
140 | u8 devfn; /* PCI devfn number */ | ||
141 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | ||
142 | struct dmar_domain *domain; /* pointer to domain */ | ||
143 | }; | ||
144 | |||
145 | extern int init_dmars(void); | ||
146 | extern void free_dmar_iommu(struct intel_iommu *iommu); | ||
147 | |||
148 | extern int dmar_disabled; | ||
149 | |||
150 | #ifndef CONFIG_DMAR_GFX_WA | ||
151 | static inline void iommu_prepare_gfx_mapping(void) | ||
152 | { | ||
153 | return; | ||
154 | } | ||
155 | #endif /* !CONFIG_DMAR_GFX_WA */ | ||
156 | |||
157 | #endif | ||
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 8bf86ae2333f..bd2c01674f5e 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -19,13 +19,16 @@ | |||
19 | * Author: Shaohua Li <shaohua.li@intel.com> | 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
21 | * | 21 | * |
22 | * This file implements early detection/parsing of DMA Remapping Devices | 22 | * This file implements early detection/parsing of Remapping Devices |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI | 23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | 24 | * tables. |
25 | * | ||
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | ||
25 | */ | 27 | */ |
26 | 28 | ||
27 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
28 | #include <linux/dmar.h> | 30 | #include <linux/dmar.h> |
31 | #include <linux/timer.h> | ||
29 | #include "iova.h" | 32 | #include "iova.h" |
30 | #include "intel-iommu.h" | 33 | #include "intel-iommu.h" |
31 | 34 | ||
@@ -37,7 +40,6 @@ | |||
37 | * these units are not supported by the architecture. | 40 | * these units are not supported by the architecture. |
38 | */ | 41 | */ |
39 | LIST_HEAD(dmar_drhd_units); | 42 | LIST_HEAD(dmar_drhd_units); |
40 | LIST_HEAD(dmar_rmrr_units); | ||
41 | 43 | ||
42 | static struct acpi_table_header * __initdata dmar_tbl; | 44 | static struct acpi_table_header * __initdata dmar_tbl; |
43 | 45 | ||
@@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |||
53 | list_add(&drhd->list, &dmar_drhd_units); | 55 | list_add(&drhd->list, &dmar_drhd_units); |
54 | } | 56 | } |
55 | 57 | ||
56 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
57 | { | ||
58 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
59 | } | ||
60 | |||
61 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, | 58 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
62 | struct pci_dev **dev, u16 segment) | 59 | struct pci_dev **dev, u16 segment) |
63 | { | 60 | { |
@@ -172,19 +169,37 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
172 | struct acpi_dmar_hardware_unit *drhd; | 169 | struct acpi_dmar_hardware_unit *drhd; |
173 | struct dmar_drhd_unit *dmaru; | 170 | struct dmar_drhd_unit *dmaru; |
174 | int ret = 0; | 171 | int ret = 0; |
175 | static int include_all; | ||
176 | 172 | ||
177 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | 173 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
178 | if (!dmaru) | 174 | if (!dmaru) |
179 | return -ENOMEM; | 175 | return -ENOMEM; |
180 | 176 | ||
177 | dmaru->hdr = header; | ||
181 | drhd = (struct acpi_dmar_hardware_unit *)header; | 178 | drhd = (struct acpi_dmar_hardware_unit *)header; |
182 | dmaru->reg_base_addr = drhd->address; | 179 | dmaru->reg_base_addr = drhd->address; |
183 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 180 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
184 | 181 | ||
182 | ret = alloc_iommu(dmaru); | ||
183 | if (ret) { | ||
184 | kfree(dmaru); | ||
185 | return ret; | ||
186 | } | ||
187 | dmar_register_drhd_unit(dmaru); | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int __init | ||
192 | dmar_parse_dev(struct dmar_drhd_unit *dmaru) | ||
193 | { | ||
194 | struct acpi_dmar_hardware_unit *drhd; | ||
195 | static int include_all; | ||
196 | int ret; | ||
197 | |||
198 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | ||
199 | |||
185 | if (!dmaru->include_all) | 200 | if (!dmaru->include_all) |
186 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | 201 | ret = dmar_parse_dev_scope((void *)(drhd + 1), |
187 | ((void *)drhd) + header->length, | 202 | ((void *)drhd) + drhd->header.length, |
188 | &dmaru->devices_cnt, &dmaru->devices, | 203 | &dmaru->devices_cnt, &dmaru->devices, |
189 | drhd->segment); | 204 | drhd->segment); |
190 | else { | 205 | else { |
@@ -197,37 +212,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
197 | include_all = 1; | 212 | include_all = 1; |
198 | } | 213 | } |
199 | 214 | ||
200 | if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) | 215 | if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) { |
216 | list_del(&dmaru->list); | ||
201 | kfree(dmaru); | 217 | kfree(dmaru); |
202 | else | 218 | } |
203 | dmar_register_drhd_unit(dmaru); | ||
204 | return ret; | 219 | return ret; |
205 | } | 220 | } |
206 | 221 | ||
222 | #ifdef CONFIG_DMAR | ||
223 | LIST_HEAD(dmar_rmrr_units); | ||
224 | |||
225 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | ||
226 | { | ||
227 | list_add(&rmrr->list, &dmar_rmrr_units); | ||
228 | } | ||
229 | |||
230 | |||
207 | static int __init | 231 | static int __init |
208 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 232 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) |
209 | { | 233 | { |
210 | struct acpi_dmar_reserved_memory *rmrr; | 234 | struct acpi_dmar_reserved_memory *rmrr; |
211 | struct dmar_rmrr_unit *rmrru; | 235 | struct dmar_rmrr_unit *rmrru; |
212 | int ret = 0; | ||
213 | 236 | ||
214 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | 237 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
215 | if (!rmrru) | 238 | if (!rmrru) |
216 | return -ENOMEM; | 239 | return -ENOMEM; |
217 | 240 | ||
241 | rmrru->hdr = header; | ||
218 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 242 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
219 | rmrru->base_address = rmrr->base_address; | 243 | rmrru->base_address = rmrr->base_address; |
220 | rmrru->end_address = rmrr->end_address; | 244 | rmrru->end_address = rmrr->end_address; |
245 | |||
246 | dmar_register_rmrr_unit(rmrru); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int __init | ||
251 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | ||
252 | { | ||
253 | struct acpi_dmar_reserved_memory *rmrr; | ||
254 | int ret; | ||
255 | |||
256 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | ||
221 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), | 257 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
222 | ((void *)rmrr) + header->length, | 258 | ((void *)rmrr) + rmrr->header.length, |
223 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); | 259 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
224 | 260 | ||
225 | if (ret || (rmrru->devices_cnt == 0)) | 261 | if (ret || (rmrru->devices_cnt == 0)) { |
262 | list_del(&rmrru->list); | ||
226 | kfree(rmrru); | 263 | kfree(rmrru); |
227 | else | 264 | } |
228 | dmar_register_rmrr_unit(rmrru); | ||
229 | return ret; | 265 | return ret; |
230 | } | 266 | } |
267 | #endif | ||
231 | 268 | ||
232 | static void __init | 269 | static void __init |
233 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | 270 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
@@ -252,6 +289,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
252 | } | 289 | } |
253 | } | 290 | } |
254 | 291 | ||
292 | |||
255 | /** | 293 | /** |
256 | * parse_dmar_table - parses the DMA reporting table | 294 | * parse_dmar_table - parses the DMA reporting table |
257 | */ | 295 | */ |
@@ -284,7 +322,9 @@ parse_dmar_table(void) | |||
284 | ret = dmar_parse_one_drhd(entry_header); | 322 | ret = dmar_parse_one_drhd(entry_header); |
285 | break; | 323 | break; |
286 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 324 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
325 | #ifdef CONFIG_DMAR | ||
287 | ret = dmar_parse_one_rmrr(entry_header); | 326 | ret = dmar_parse_one_rmrr(entry_header); |
327 | #endif | ||
288 | break; | 328 | break; |
289 | default: | 329 | default: |
290 | printk(KERN_WARNING PREFIX | 330 | printk(KERN_WARNING PREFIX |
@@ -300,15 +340,77 @@ parse_dmar_table(void) | |||
300 | return ret; | 340 | return ret; |
301 | } | 341 | } |
302 | 342 | ||
343 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | ||
344 | struct pci_dev *dev) | ||
345 | { | ||
346 | int index; | ||
347 | |||
348 | while (dev) { | ||
349 | for (index = 0; index < cnt; index++) | ||
350 | if (dev == devices[index]) | ||
351 | return 1; | ||
303 | 352 | ||
304 | int __init dmar_table_init(void) | 353 | /* Check our parent */ |
354 | dev = dev->bus->self; | ||
355 | } | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | struct dmar_drhd_unit * | ||
361 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | ||
305 | { | 362 | { |
363 | struct dmar_drhd_unit *drhd = NULL; | ||
364 | |||
365 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | ||
366 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | ||
367 | drhd->devices_cnt, dev)) | ||
368 | return drhd; | ||
369 | } | ||
370 | |||
371 | return NULL; | ||
372 | } | ||
373 | |||
374 | int __init dmar_dev_scope_init(void) | ||
375 | { | ||
376 | struct dmar_drhd_unit *drhd; | ||
377 | int ret = -ENODEV; | ||
378 | |||
379 | for_each_drhd_unit(drhd) { | ||
380 | ret = dmar_parse_dev(drhd); | ||
381 | if (ret) | ||
382 | return ret; | ||
383 | } | ||
384 | |||
385 | #ifdef CONFIG_DMAR | ||
386 | { | ||
387 | struct dmar_rmrr_unit *rmrr; | ||
388 | for_each_rmrr_units(rmrr) { | ||
389 | ret = rmrr_parse_dev(rmrr); | ||
390 | if (ret) | ||
391 | return ret; | ||
392 | } | ||
393 | } | ||
394 | #endif | ||
395 | |||
396 | return ret; | ||
397 | } | ||
306 | 398 | ||
399 | |||
400 | int __init dmar_table_init(void) | ||
401 | { | ||
402 | static int dmar_table_initialized; | ||
307 | int ret; | 403 | int ret; |
308 | 404 | ||
405 | if (dmar_table_initialized) | ||
406 | return 0; | ||
407 | |||
408 | dmar_table_initialized = 1; | ||
409 | |||
309 | ret = parse_dmar_table(); | 410 | ret = parse_dmar_table(); |
310 | if (ret) { | 411 | if (ret) { |
311 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | 412 | if (ret != -ENODEV) |
413 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | ||
312 | return ret; | 414 | return ret; |
313 | } | 415 | } |
314 | 416 | ||
@@ -317,9 +419,14 @@ int __init dmar_table_init(void) | |||
317 | return -ENODEV; | 419 | return -ENODEV; |
318 | } | 420 | } |
319 | 421 | ||
422 | #ifdef CONFIG_DMAR | ||
320 | if (list_empty(&dmar_rmrr_units)) | 423 | if (list_empty(&dmar_rmrr_units)) |
321 | printk(KERN_INFO PREFIX "No RMRR found\n"); | 424 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
425 | #endif | ||
322 | 426 | ||
427 | #ifdef CONFIG_INTR_REMAP | ||
428 | parse_ioapics_under_ir(); | ||
429 | #endif | ||
323 | return 0; | 430 | return 0; |
324 | } | 431 | } |
325 | 432 | ||
@@ -341,3 +448,255 @@ int __init early_dmar_detect(void) | |||
341 | 448 | ||
342 | return (ACPI_SUCCESS(status) ? 1 : 0); | 449 | return (ACPI_SUCCESS(status) ? 1 : 0); |
343 | } | 450 | } |
451 | |||
452 | void __init detect_intel_iommu(void) | ||
453 | { | ||
454 | int ret; | ||
455 | |||
456 | ret = early_dmar_detect(); | ||
457 | |||
458 | #ifdef CONFIG_DMAR | ||
459 | { | ||
460 | struct acpi_table_dmar *dmar; | ||
461 | /* | ||
462 | * for now we will disable dma-remapping when interrupt | ||
463 | * remapping is enabled. | ||
464 | * When support for queued invalidation for IOTLB invalidation | ||
465 | * is added, we will not need this any more. | ||
466 | */ | ||
467 | dmar = (struct acpi_table_dmar *) dmar_tbl; | ||
468 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) { | ||
469 | printk(KERN_INFO | ||
470 | "Queued invalidation will be enabled to support " | ||
471 | "x2apic and Intr-remapping.\n"); | ||
472 | printk(KERN_INFO | ||
473 | "Disabling IOMMU detection, because of missing " | ||
474 | "queued invalidation support for IOTLB " | ||
475 | "invalidation\n"); | ||
476 | printk(KERN_INFO | ||
477 | "Use \"nox2apic\", if you want to use Intel " | ||
478 | " IOMMU for DMA-remapping and don't care about " | ||
479 | " x2apic support\n"); | ||
480 | |||
481 | dmar_disabled = 1; | ||
482 | return; | ||
483 | } | ||
484 | |||
485 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | ||
486 | !dmar_disabled) | ||
487 | iommu_detected = 1; | ||
488 | } | ||
489 | #endif | ||
490 | } | ||
491 | |||
492 | |||
493 | int alloc_iommu(struct dmar_drhd_unit *drhd) | ||
494 | { | ||
495 | struct intel_iommu *iommu; | ||
496 | int map_size; | ||
497 | u32 ver; | ||
498 | static int iommu_allocated = 0; | ||
499 | |||
500 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
501 | if (!iommu) | ||
502 | return -ENOMEM; | ||
503 | |||
504 | iommu->seq_id = iommu_allocated++; | ||
505 | |||
506 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | ||
507 | if (!iommu->reg) { | ||
508 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
509 | goto error; | ||
510 | } | ||
511 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | ||
512 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
513 | |||
514 | /* the registers might be more than one page */ | ||
515 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | ||
516 | cap_max_fault_reg_offset(iommu->cap)); | ||
517 | map_size = PAGE_ALIGN_4K(map_size); | ||
518 | if (map_size > PAGE_SIZE_4K) { | ||
519 | iounmap(iommu->reg); | ||
520 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | ||
521 | if (!iommu->reg) { | ||
522 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
523 | goto error; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | ver = readl(iommu->reg + DMAR_VER_REG); | ||
528 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | ||
529 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | ||
530 | iommu->cap, iommu->ecap); | ||
531 | |||
532 | spin_lock_init(&iommu->register_lock); | ||
533 | |||
534 | drhd->iommu = iommu; | ||
535 | return 0; | ||
536 | error: | ||
537 | kfree(iommu); | ||
538 | return -1; | ||
539 | } | ||
540 | |||
541 | void free_iommu(struct intel_iommu *iommu) | ||
542 | { | ||
543 | if (!iommu) | ||
544 | return; | ||
545 | |||
546 | #ifdef CONFIG_DMAR | ||
547 | free_dmar_iommu(iommu); | ||
548 | #endif | ||
549 | |||
550 | if (iommu->reg) | ||
551 | iounmap(iommu->reg); | ||
552 | kfree(iommu); | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * Reclaim all the submitted descriptors which have completed its work. | ||
557 | */ | ||
558 | static inline void reclaim_free_desc(struct q_inval *qi) | ||
559 | { | ||
560 | while (qi->desc_status[qi->free_tail] == QI_DONE) { | ||
561 | qi->desc_status[qi->free_tail] = QI_FREE; | ||
562 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | ||
563 | qi->free_cnt++; | ||
564 | } | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Submit the queued invalidation descriptor to the remapping | ||
569 | * hardware unit and wait for its completion. | ||
570 | */ | ||
571 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | ||
572 | { | ||
573 | struct q_inval *qi = iommu->qi; | ||
574 | struct qi_desc *hw, wait_desc; | ||
575 | int wait_index, index; | ||
576 | unsigned long flags; | ||
577 | |||
578 | if (!qi) | ||
579 | return; | ||
580 | |||
581 | hw = qi->desc; | ||
582 | |||
583 | spin_lock(&qi->q_lock); | ||
584 | while (qi->free_cnt < 3) { | ||
585 | spin_unlock(&qi->q_lock); | ||
586 | cpu_relax(); | ||
587 | spin_lock(&qi->q_lock); | ||
588 | } | ||
589 | |||
590 | index = qi->free_head; | ||
591 | wait_index = (index + 1) % QI_LENGTH; | ||
592 | |||
593 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | ||
594 | |||
595 | hw[index] = *desc; | ||
596 | |||
597 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | ||
598 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | ||
599 | |||
600 | hw[wait_index] = wait_desc; | ||
601 | |||
602 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | ||
603 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | ||
604 | |||
605 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | ||
606 | qi->free_cnt -= 2; | ||
607 | |||
608 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
609 | /* | ||
610 | * update the HW tail register indicating the presence of | ||
611 | * new descriptors. | ||
612 | */ | ||
613 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | ||
614 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
615 | |||
616 | while (qi->desc_status[wait_index] != QI_DONE) { | ||
617 | spin_unlock(&qi->q_lock); | ||
618 | cpu_relax(); | ||
619 | spin_lock(&qi->q_lock); | ||
620 | } | ||
621 | |||
622 | qi->desc_status[index] = QI_DONE; | ||
623 | |||
624 | reclaim_free_desc(qi); | ||
625 | spin_unlock(&qi->q_lock); | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * Flush the global interrupt entry cache. | ||
630 | */ | ||
631 | void qi_global_iec(struct intel_iommu *iommu) | ||
632 | { | ||
633 | struct qi_desc desc; | ||
634 | |||
635 | desc.low = QI_IEC_TYPE; | ||
636 | desc.high = 0; | ||
637 | |||
638 | qi_submit_sync(&desc, iommu); | ||
639 | } | ||
640 | |||
641 | /* | ||
642 | * Enable Queued Invalidation interface. This is a must to support | ||
643 | * interrupt-remapping. Also used by DMA-remapping, which replaces | ||
644 | * register based IOTLB invalidation. | ||
645 | */ | ||
646 | int dmar_enable_qi(struct intel_iommu *iommu) | ||
647 | { | ||
648 | u32 cmd, sts; | ||
649 | unsigned long flags; | ||
650 | struct q_inval *qi; | ||
651 | |||
652 | if (!ecap_qis(iommu->ecap)) | ||
653 | return -ENOENT; | ||
654 | |||
655 | /* | ||
656 | * queued invalidation is already setup and enabled. | ||
657 | */ | ||
658 | if (iommu->qi) | ||
659 | return 0; | ||
660 | |||
661 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); | ||
662 | if (!iommu->qi) | ||
663 | return -ENOMEM; | ||
664 | |||
665 | qi = iommu->qi; | ||
666 | |||
667 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); | ||
668 | if (!qi->desc) { | ||
669 | kfree(qi); | ||
670 | iommu->qi = 0; | ||
671 | return -ENOMEM; | ||
672 | } | ||
673 | |||
674 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); | ||
675 | if (!qi->desc_status) { | ||
676 | free_page((unsigned long) qi->desc); | ||
677 | kfree(qi); | ||
678 | iommu->qi = 0; | ||
679 | return -ENOMEM; | ||
680 | } | ||
681 | |||
682 | qi->free_head = qi->free_tail = 0; | ||
683 | qi->free_cnt = QI_LENGTH; | ||
684 | |||
685 | spin_lock_init(&qi->q_lock); | ||
686 | |||
687 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
688 | /* write zero to the tail reg */ | ||
689 | writel(0, iommu->reg + DMAR_IQT_REG); | ||
690 | |||
691 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | ||
692 | |||
693 | cmd = iommu->gcmd | DMA_GCMD_QIE; | ||
694 | iommu->gcmd |= DMA_GCMD_QIE; | ||
695 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | ||
696 | |||
697 | /* Make sure hardware complete it */ | ||
698 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | ||
699 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
700 | |||
701 | return 0; | ||
702 | } | ||
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 8d0e60ac849c..389fdd6f4a9f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -49,8 +49,6 @@ | |||
49 | 49 | ||
50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 | 50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 |
51 | 51 | ||
52 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | ||
53 | |||
54 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 52 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
55 | 53 | ||
56 | 54 | ||
@@ -58,8 +56,6 @@ static void flush_unmaps_timeout(unsigned long data); | |||
58 | 56 | ||
59 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 57 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
60 | 58 | ||
61 | static struct intel_iommu *g_iommus; | ||
62 | |||
63 | #define HIGH_WATER_MARK 250 | 59 | #define HIGH_WATER_MARK 250 |
64 | struct deferred_flush_tables { | 60 | struct deferred_flush_tables { |
65 | int next; | 61 | int next; |
@@ -80,7 +76,7 @@ static long list_size; | |||
80 | 76 | ||
81 | static void domain_remove_dev_info(struct dmar_domain *domain); | 77 | static void domain_remove_dev_info(struct dmar_domain *domain); |
82 | 78 | ||
83 | static int dmar_disabled; | 79 | int dmar_disabled; |
84 | static int __initdata dmar_map_gfx = 1; | 80 | static int __initdata dmar_map_gfx = 1; |
85 | static int dmar_forcedac; | 81 | static int dmar_forcedac; |
86 | static int intel_iommu_strict; | 82 | static int intel_iommu_strict; |
@@ -185,13 +181,6 @@ void free_iova_mem(struct iova *iova) | |||
185 | kmem_cache_free(iommu_iova_cache, iova); | 181 | kmem_cache_free(iommu_iova_cache, iova); |
186 | } | 182 | } |
187 | 183 | ||
188 | static inline void __iommu_flush_cache( | ||
189 | struct intel_iommu *iommu, void *addr, int size) | ||
190 | { | ||
191 | if (!ecap_coherent(iommu->ecap)) | ||
192 | clflush_cache_range(addr, size); | ||
193 | } | ||
194 | |||
195 | /* Gets context entry for a given bus and devfn */ | 184 | /* Gets context entry for a given bus and devfn */ |
196 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 185 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
197 | u8 bus, u8 devfn) | 186 | u8 bus, u8 devfn) |
@@ -488,19 +477,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
488 | return 0; | 477 | return 0; |
489 | } | 478 | } |
490 | 479 | ||
491 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | ||
492 | {\ | ||
493 | cycles_t start_time = get_cycles();\ | ||
494 | while (1) {\ | ||
495 | sts = op (iommu->reg + offset);\ | ||
496 | if (cond)\ | ||
497 | break;\ | ||
498 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | ||
499 | panic("DMAR hardware is malfunctioning\n");\ | ||
500 | cpu_relax();\ | ||
501 | }\ | ||
502 | } | ||
503 | |||
504 | static void iommu_set_root_entry(struct intel_iommu *iommu) | 480 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
505 | { | 481 | { |
506 | void *addr; | 482 | void *addr; |
@@ -990,6 +966,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
990 | return -ENOMEM; | 966 | return -ENOMEM; |
991 | } | 967 | } |
992 | 968 | ||
969 | spin_lock_init(&iommu->lock); | ||
970 | |||
993 | /* | 971 | /* |
994 | * if Caching mode is set, then invalid translations are tagged | 972 | * if Caching mode is set, then invalid translations are tagged |
995 | * with domainid 0. Hence we need to pre-allocate it. | 973 | * with domainid 0. Hence we need to pre-allocate it. |
@@ -998,62 +976,15 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
998 | set_bit(0, iommu->domain_ids); | 976 | set_bit(0, iommu->domain_ids); |
999 | return 0; | 977 | return 0; |
1000 | } | 978 | } |
1001 | static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, | ||
1002 | struct dmar_drhd_unit *drhd) | ||
1003 | { | ||
1004 | int ret; | ||
1005 | int map_size; | ||
1006 | u32 ver; | ||
1007 | |||
1008 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | ||
1009 | if (!iommu->reg) { | ||
1010 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1011 | goto error; | ||
1012 | } | ||
1013 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | ||
1014 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
1015 | |||
1016 | /* the registers might be more than one page */ | ||
1017 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | ||
1018 | cap_max_fault_reg_offset(iommu->cap)); | ||
1019 | map_size = PAGE_ALIGN_4K(map_size); | ||
1020 | if (map_size > PAGE_SIZE_4K) { | ||
1021 | iounmap(iommu->reg); | ||
1022 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | ||
1023 | if (!iommu->reg) { | ||
1024 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1025 | goto error; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | ver = readl(iommu->reg + DMAR_VER_REG); | ||
1030 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | ||
1031 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | ||
1032 | iommu->cap, iommu->ecap); | ||
1033 | ret = iommu_init_domains(iommu); | ||
1034 | if (ret) | ||
1035 | goto error_unmap; | ||
1036 | spin_lock_init(&iommu->lock); | ||
1037 | spin_lock_init(&iommu->register_lock); | ||
1038 | 979 | ||
1039 | drhd->iommu = iommu; | ||
1040 | return iommu; | ||
1041 | error_unmap: | ||
1042 | iounmap(iommu->reg); | ||
1043 | error: | ||
1044 | kfree(iommu); | ||
1045 | return NULL; | ||
1046 | } | ||
1047 | 980 | ||
1048 | static void domain_exit(struct dmar_domain *domain); | 981 | static void domain_exit(struct dmar_domain *domain); |
1049 | static void free_iommu(struct intel_iommu *iommu) | 982 | |
983 | void free_dmar_iommu(struct intel_iommu *iommu) | ||
1050 | { | 984 | { |
1051 | struct dmar_domain *domain; | 985 | struct dmar_domain *domain; |
1052 | int i; | 986 | int i; |
1053 | 987 | ||
1054 | if (!iommu) | ||
1055 | return; | ||
1056 | |||
1057 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 988 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
1058 | for (; i < cap_ndoms(iommu->cap); ) { | 989 | for (; i < cap_ndoms(iommu->cap); ) { |
1059 | domain = iommu->domains[i]; | 990 | domain = iommu->domains[i]; |
@@ -1078,10 +1009,6 @@ static void free_iommu(struct intel_iommu *iommu) | |||
1078 | 1009 | ||
1079 | /* free context mapping */ | 1010 | /* free context mapping */ |
1080 | free_context_table(iommu); | 1011 | free_context_table(iommu); |
1081 | |||
1082 | if (iommu->reg) | ||
1083 | iounmap(iommu->reg); | ||
1084 | kfree(iommu); | ||
1085 | } | 1012 | } |
1086 | 1013 | ||
1087 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 1014 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) |
@@ -1426,37 +1353,6 @@ find_domain(struct pci_dev *pdev) | |||
1426 | return NULL; | 1353 | return NULL; |
1427 | } | 1354 | } |
1428 | 1355 | ||
1429 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | ||
1430 | struct pci_dev *dev) | ||
1431 | { | ||
1432 | int index; | ||
1433 | |||
1434 | while (dev) { | ||
1435 | for (index = 0; index < cnt; index++) | ||
1436 | if (dev == devices[index]) | ||
1437 | return 1; | ||
1438 | |||
1439 | /* Check our parent */ | ||
1440 | dev = dev->bus->self; | ||
1441 | } | ||
1442 | |||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static struct dmar_drhd_unit * | ||
1447 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | ||
1448 | { | ||
1449 | struct dmar_drhd_unit *drhd = NULL; | ||
1450 | |||
1451 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | ||
1452 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | ||
1453 | drhd->devices_cnt, dev)) | ||
1454 | return drhd; | ||
1455 | } | ||
1456 | |||
1457 | return NULL; | ||
1458 | } | ||
1459 | |||
1460 | /* domain is initialized */ | 1356 | /* domain is initialized */ |
1461 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | 1357 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) |
1462 | { | 1358 | { |
@@ -1729,8 +1625,6 @@ int __init init_dmars(void) | |||
1729 | * endfor | 1625 | * endfor |
1730 | */ | 1626 | */ |
1731 | for_each_drhd_unit(drhd) { | 1627 | for_each_drhd_unit(drhd) { |
1732 | if (drhd->ignored) | ||
1733 | continue; | ||
1734 | g_num_of_iommus++; | 1628 | g_num_of_iommus++; |
1735 | /* | 1629 | /* |
1736 | * lock not needed as this is only incremented in the single | 1630 | * lock not needed as this is only incremented in the single |
@@ -1739,12 +1633,6 @@ int __init init_dmars(void) | |||
1739 | */ | 1633 | */ |
1740 | } | 1634 | } |
1741 | 1635 | ||
1742 | g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); | ||
1743 | if (!g_iommus) { | ||
1744 | ret = -ENOMEM; | ||
1745 | goto error; | ||
1746 | } | ||
1747 | |||
1748 | deferred_flush = kzalloc(g_num_of_iommus * | 1636 | deferred_flush = kzalloc(g_num_of_iommus * |
1749 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 1637 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1750 | if (!deferred_flush) { | 1638 | if (!deferred_flush) { |
@@ -1752,16 +1640,15 @@ int __init init_dmars(void) | |||
1752 | goto error; | 1640 | goto error; |
1753 | } | 1641 | } |
1754 | 1642 | ||
1755 | i = 0; | ||
1756 | for_each_drhd_unit(drhd) { | 1643 | for_each_drhd_unit(drhd) { |
1757 | if (drhd->ignored) | 1644 | if (drhd->ignored) |
1758 | continue; | 1645 | continue; |
1759 | iommu = alloc_iommu(&g_iommus[i], drhd); | 1646 | |
1760 | i++; | 1647 | iommu = drhd->iommu; |
1761 | if (!iommu) { | 1648 | |
1762 | ret = -ENOMEM; | 1649 | ret = iommu_init_domains(iommu); |
1650 | if (ret) | ||
1763 | goto error; | 1651 | goto error; |
1764 | } | ||
1765 | 1652 | ||
1766 | /* | 1653 | /* |
1767 | * TBD: | 1654 | * TBD: |
@@ -1845,7 +1732,6 @@ error: | |||
1845 | iommu = drhd->iommu; | 1732 | iommu = drhd->iommu; |
1846 | free_iommu(iommu); | 1733 | free_iommu(iommu); |
1847 | } | 1734 | } |
1848 | kfree(g_iommus); | ||
1849 | return ret; | 1735 | return ret; |
1850 | } | 1736 | } |
1851 | 1737 | ||
@@ -2002,7 +1888,10 @@ static void flush_unmaps(void) | |||
2002 | /* just flush them all */ | 1888 | /* just flush them all */ |
2003 | for (i = 0; i < g_num_of_iommus; i++) { | 1889 | for (i = 0; i < g_num_of_iommus; i++) { |
2004 | if (deferred_flush[i].next) { | 1890 | if (deferred_flush[i].next) { |
2005 | iommu_flush_iotlb_global(&g_iommus[i], 0); | 1891 | struct intel_iommu *iommu = |
1892 | deferred_flush[i].domain[0]->iommu; | ||
1893 | |||
1894 | iommu_flush_iotlb_global(iommu, 0); | ||
2006 | for (j = 0; j < deferred_flush[i].next; j++) { | 1895 | for (j = 0; j < deferred_flush[i].next; j++) { |
2007 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1896 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
2008 | deferred_flush[i].iova[j]); | 1897 | deferred_flush[i].iova[j]); |
@@ -2032,7 +1921,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2032 | if (list_size == HIGH_WATER_MARK) | 1921 | if (list_size == HIGH_WATER_MARK) |
2033 | flush_unmaps(); | 1922 | flush_unmaps(); |
2034 | 1923 | ||
2035 | iommu_id = dom->iommu - g_iommus; | 1924 | iommu_id = dom->iommu->seq_id; |
1925 | |||
2036 | next = deferred_flush[iommu_id].next; | 1926 | next = deferred_flush[iommu_id].next; |
2037 | deferred_flush[iommu_id].domain[next] = dom; | 1927 | deferred_flush[iommu_id].domain[next] = dom; |
2038 | deferred_flush[iommu_id].iova[next] = iova; | 1928 | deferred_flush[iommu_id].iova[next] = iova; |
@@ -2348,15 +2238,6 @@ static void __init iommu_exit_mempool(void) | |||
2348 | 2238 | ||
2349 | } | 2239 | } |
2350 | 2240 | ||
2351 | void __init detect_intel_iommu(void) | ||
2352 | { | ||
2353 | if (swiotlb || no_iommu || iommu_detected || dmar_disabled) | ||
2354 | return; | ||
2355 | if (early_dmar_detect()) { | ||
2356 | iommu_detected = 1; | ||
2357 | } | ||
2358 | } | ||
2359 | |||
2360 | static void __init init_no_remapping_devices(void) | 2241 | static void __init init_no_remapping_devices(void) |
2361 | { | 2242 | { |
2362 | struct dmar_drhd_unit *drhd; | 2243 | struct dmar_drhd_unit *drhd; |
@@ -2403,12 +2284,19 @@ int __init intel_iommu_init(void) | |||
2403 | { | 2284 | { |
2404 | int ret = 0; | 2285 | int ret = 0; |
2405 | 2286 | ||
2406 | if (no_iommu || swiotlb || dmar_disabled) | ||
2407 | return -ENODEV; | ||
2408 | |||
2409 | if (dmar_table_init()) | 2287 | if (dmar_table_init()) |
2410 | return -ENODEV; | 2288 | return -ENODEV; |
2411 | 2289 | ||
2290 | if (dmar_dev_scope_init()) | ||
2291 | return -ENODEV; | ||
2292 | |||
2293 | /* | ||
2294 | * Check the need for DMA-remapping initialization now. | ||
2295 | * Above initialization will also be used by Interrupt-remapping. | ||
2296 | */ | ||
2297 | if (no_iommu || swiotlb || dmar_disabled) | ||
2298 | return -ENODEV; | ||
2299 | |||
2412 | iommu_init_mempool(); | 2300 | iommu_init_mempool(); |
2413 | dmar_init_reserved_ranges(); | 2301 | dmar_init_reserved_ranges(); |
2414 | 2302 | ||
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index afc0ad96122e..2142c01e0143 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -27,19 +27,8 @@ | |||
27 | #include <linux/sysdev.h> | 27 | #include <linux/sysdev.h> |
28 | #include "iova.h" | 28 | #include "iova.h" |
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | 30 | #include <asm/cacheflush.h> | |
31 | /* | 31 | #include "dma_remapping.h" |
32 | * We need a fixed PAGE_SIZE of 4K irrespective of | ||
33 | * arch PAGE_SIZE for IOMMU page tables. | ||
34 | */ | ||
35 | #define PAGE_SHIFT_4K (12) | ||
36 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | ||
37 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | ||
38 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | ||
39 | |||
40 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | ||
41 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
42 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
43 | 32 | ||
44 | /* | 33 | /* |
45 | * Intel IOMMU register specification per version 1.0 public spec. | 34 | * Intel IOMMU register specification per version 1.0 public spec. |
@@ -63,6 +52,11 @@ | |||
63 | #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ | 52 | #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ |
64 | #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ | 53 | #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ |
65 | #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ | 54 | #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ |
55 | #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ | ||
56 | #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ | ||
57 | #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ | ||
58 | #define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ | ||
59 | #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ | ||
66 | 60 | ||
67 | #define OFFSET_STRIDE (9) | 61 | #define OFFSET_STRIDE (9) |
68 | /* | 62 | /* |
@@ -126,6 +120,10 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
126 | #define ecap_max_iotlb_offset(e) \ | 120 | #define ecap_max_iotlb_offset(e) \ |
127 | (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) | 121 | (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) |
128 | #define ecap_coherent(e) ((e) & 0x1) | 122 | #define ecap_coherent(e) ((e) & 0x1) |
123 | #define ecap_qis(e) ((e) & 0x2) | ||
124 | #define ecap_eim_support(e) ((e >> 4) & 0x1) | ||
125 | #define ecap_ir_support(e) ((e >> 3) & 0x1) | ||
126 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) | ||
129 | 127 | ||
130 | 128 | ||
131 | /* IOTLB_REG */ | 129 | /* IOTLB_REG */ |
@@ -141,6 +139,17 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
141 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) | 139 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) |
142 | #define DMA_TLB_MAX_SIZE (0x3f) | 140 | #define DMA_TLB_MAX_SIZE (0x3f) |
143 | 141 | ||
142 | /* INVALID_DESC */ | ||
143 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) | ||
144 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) | ||
145 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) | ||
146 | #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) | ||
147 | #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) | ||
148 | #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) | ||
149 | #define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) | ||
150 | #define DMA_ID_TLB_ADDR(addr) (addr) | ||
151 | #define DMA_ID_TLB_ADDR_MASK(mask) (mask) | ||
152 | |||
144 | /* PMEN_REG */ | 153 | /* PMEN_REG */ |
145 | #define DMA_PMEN_EPM (((u32)1)<<31) | 154 | #define DMA_PMEN_EPM (((u32)1)<<31) |
146 | #define DMA_PMEN_PRS (((u32)1)<<0) | 155 | #define DMA_PMEN_PRS (((u32)1)<<0) |
@@ -151,6 +160,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
151 | #define DMA_GCMD_SFL (((u32)1) << 29) | 160 | #define DMA_GCMD_SFL (((u32)1) << 29) |
152 | #define DMA_GCMD_EAFL (((u32)1) << 28) | 161 | #define DMA_GCMD_EAFL (((u32)1) << 28) |
153 | #define DMA_GCMD_WBF (((u32)1) << 27) | 162 | #define DMA_GCMD_WBF (((u32)1) << 27) |
163 | #define DMA_GCMD_QIE (((u32)1) << 26) | ||
164 | #define DMA_GCMD_SIRTP (((u32)1) << 24) | ||
165 | #define DMA_GCMD_IRE (((u32) 1) << 25) | ||
154 | 166 | ||
155 | /* GSTS_REG */ | 167 | /* GSTS_REG */ |
156 | #define DMA_GSTS_TES (((u32)1) << 31) | 168 | #define DMA_GSTS_TES (((u32)1) << 31) |
@@ -158,6 +170,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
158 | #define DMA_GSTS_FLS (((u32)1) << 29) | 170 | #define DMA_GSTS_FLS (((u32)1) << 29) |
159 | #define DMA_GSTS_AFLS (((u32)1) << 28) | 171 | #define DMA_GSTS_AFLS (((u32)1) << 28) |
160 | #define DMA_GSTS_WBFS (((u32)1) << 27) | 172 | #define DMA_GSTS_WBFS (((u32)1) << 27) |
173 | #define DMA_GSTS_QIES (((u32)1) << 26) | ||
174 | #define DMA_GSTS_IRTPS (((u32)1) << 24) | ||
175 | #define DMA_GSTS_IRES (((u32)1) << 25) | ||
161 | 176 | ||
162 | /* CCMD_REG */ | 177 | /* CCMD_REG */ |
163 | #define DMA_CCMD_ICC (((u64)1) << 63) | 178 | #define DMA_CCMD_ICC (((u64)1) << 63) |
@@ -187,158 +202,106 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
187 | #define dma_frcd_source_id(c) (c & 0xffff) | 202 | #define dma_frcd_source_id(c) (c & 0xffff) |
188 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ | 203 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ |
189 | 204 | ||
190 | /* | 205 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ |
191 | * 0: Present | 206 | |
192 | * 1-11: Reserved | 207 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ |
193 | * 12-63: Context Ptr (12 - (haw-1)) | 208 | {\ |
194 | * 64-127: Reserved | 209 | cycles_t start_time = get_cycles();\ |
195 | */ | 210 | while (1) {\ |
196 | struct root_entry { | 211 | sts = op (iommu->reg + offset);\ |
197 | u64 val; | 212 | if (cond)\ |
198 | u64 rsvd1; | 213 | break;\ |
199 | }; | 214 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ |
200 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | 215 | panic("DMAR hardware is malfunctioning\n");\ |
201 | static inline bool root_present(struct root_entry *root) | 216 | cpu_relax();\ |
202 | { | 217 | }\ |
203 | return (root->val & 1); | ||
204 | } | ||
205 | static inline void set_root_present(struct root_entry *root) | ||
206 | { | ||
207 | root->val |= 1; | ||
208 | } | ||
209 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
210 | { | ||
211 | root->val |= value & PAGE_MASK_4K; | ||
212 | } | 218 | } |
213 | 219 | ||
214 | struct context_entry; | 220 | #define QI_LENGTH 256 /* queue length */ |
215 | static inline struct context_entry * | ||
216 | get_context_addr_from_root(struct root_entry *root) | ||
217 | { | ||
218 | return (struct context_entry *) | ||
219 | (root_present(root)?phys_to_virt( | ||
220 | root->val & PAGE_MASK_4K): | ||
221 | NULL); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * low 64 bits: | ||
226 | * 0: present | ||
227 | * 1: fault processing disable | ||
228 | * 2-3: translation type | ||
229 | * 12-63: address space root | ||
230 | * high 64 bits: | ||
231 | * 0-2: address width | ||
232 | * 3-6: aval | ||
233 | * 8-23: domain id | ||
234 | */ | ||
235 | struct context_entry { | ||
236 | u64 lo; | ||
237 | u64 hi; | ||
238 | }; | ||
239 | #define context_present(c) ((c).lo & 1) | ||
240 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | ||
241 | #define context_translation_type(c) (((c).lo >> 2) & 3) | ||
242 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | ||
243 | #define context_address_width(c) ((c).hi & 7) | ||
244 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | ||
245 | |||
246 | #define context_set_present(c) do {(c).lo |= 1;} while (0) | ||
247 | #define context_set_fault_enable(c) \ | ||
248 | do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) | ||
249 | #define context_set_translation_type(c, val) \ | ||
250 | do { \ | ||
251 | (c).lo &= (((u64)-1) << 4) | 3; \ | ||
252 | (c).lo |= ((val) & 3) << 2; \ | ||
253 | } while (0) | ||
254 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
255 | #define context_set_address_root(c, val) \ | ||
256 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | ||
257 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | ||
258 | #define context_set_domain_id(c, val) \ | ||
259 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | ||
260 | #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) | ||
261 | 221 | ||
262 | /* | 222 | enum { |
263 | * 0: readable | 223 | QI_FREE, |
264 | * 1: writable | 224 | QI_IN_USE, |
265 | * 2-6: reserved | 225 | QI_DONE |
266 | * 7: super page | ||
267 | * 8-11: available | ||
268 | * 12-63: Host physcial address | ||
269 | */ | ||
270 | struct dma_pte { | ||
271 | u64 val; | ||
272 | }; | 226 | }; |
273 | #define dma_clear_pte(p) do {(p).val = 0;} while (0) | ||
274 | |||
275 | #define DMA_PTE_READ (1) | ||
276 | #define DMA_PTE_WRITE (2) | ||
277 | 227 | ||
278 | #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) | 228 | #define QI_CC_TYPE 0x1 |
279 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | 229 | #define QI_IOTLB_TYPE 0x2 |
280 | #define dma_set_pte_prot(p, prot) \ | 230 | #define QI_DIOTLB_TYPE 0x3 |
281 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | 231 | #define QI_IEC_TYPE 0x4 |
282 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | 232 | #define QI_IWD_TYPE 0x5 |
283 | #define dma_set_pte_addr(p, addr) do {\ | ||
284 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | ||
285 | #define dma_pte_present(p) (((p).val & 3) != 0) | ||
286 | 233 | ||
287 | struct intel_iommu; | 234 | #define QI_IEC_SELECTIVE (((u64)1) << 4) |
235 | #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) | ||
236 | #define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) | ||
288 | 237 | ||
289 | struct dmar_domain { | 238 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) |
290 | int id; /* domain id */ | 239 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) |
291 | struct intel_iommu *iommu; /* back pointer to owning iommu */ | ||
292 | 240 | ||
293 | struct list_head devices; /* all devices' list */ | 241 | struct qi_desc { |
294 | struct iova_domain iovad; /* iova's that belong to this domain */ | 242 | u64 low, high; |
243 | }; | ||
295 | 244 | ||
296 | struct dma_pte *pgd; /* virtual address */ | 245 | struct q_inval { |
297 | spinlock_t mapping_lock; /* page table lock */ | 246 | spinlock_t q_lock; |
298 | int gaw; /* max guest address width */ | 247 | struct qi_desc *desc; /* invalidation queue */ |
248 | int *desc_status; /* desc status */ | ||
249 | int free_head; /* first free entry */ | ||
250 | int free_tail; /* last free entry */ | ||
251 | int free_cnt; | ||
252 | }; | ||
299 | 253 | ||
300 | /* adjusted guest address width, 0 is level 2 30-bit */ | 254 | #ifdef CONFIG_INTR_REMAP |
301 | int agaw; | 255 | /* 1MB - maximum possible interrupt remapping table size */ |
256 | #define INTR_REMAP_PAGE_ORDER 8 | ||
257 | #define INTR_REMAP_TABLE_REG_SIZE 0xf | ||
302 | 258 | ||
303 | #define DOMAIN_FLAG_MULTIPLE_DEVICES 1 | 259 | #define INTR_REMAP_TABLE_ENTRIES 65536 |
304 | int flags; | ||
305 | }; | ||
306 | 260 | ||
307 | /* PCI domain-device relationship */ | 261 | struct ir_table { |
308 | struct device_domain_info { | 262 | struct irte *base; |
309 | struct list_head link; /* link to domain siblings */ | ||
310 | struct list_head global; /* link to global list */ | ||
311 | u8 bus; /* PCI bus numer */ | ||
312 | u8 devfn; /* PCI devfn number */ | ||
313 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | ||
314 | struct dmar_domain *domain; /* pointer to domain */ | ||
315 | }; | 263 | }; |
316 | 264 | #endif | |
317 | extern int init_dmars(void); | ||
318 | 265 | ||
319 | struct intel_iommu { | 266 | struct intel_iommu { |
320 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 267 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
321 | u64 cap; | 268 | u64 cap; |
322 | u64 ecap; | 269 | u64 ecap; |
323 | unsigned long *domain_ids; /* bitmap of domains */ | ||
324 | struct dmar_domain **domains; /* ptr to domains */ | ||
325 | int seg; | 270 | int seg; |
326 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ | 271 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ |
327 | spinlock_t lock; /* protect context, domain ids */ | ||
328 | spinlock_t register_lock; /* protect register handling */ | 272 | spinlock_t register_lock; /* protect register handling */ |
273 | int seq_id; /* sequence id of the iommu */ | ||
274 | |||
275 | #ifdef CONFIG_DMAR | ||
276 | unsigned long *domain_ids; /* bitmap of domains */ | ||
277 | struct dmar_domain **domains; /* ptr to domains */ | ||
278 | spinlock_t lock; /* protect context, domain ids */ | ||
329 | struct root_entry *root_entry; /* virtual address */ | 279 | struct root_entry *root_entry; /* virtual address */ |
330 | 280 | ||
331 | unsigned int irq; | 281 | unsigned int irq; |
332 | unsigned char name[7]; /* Device Name */ | 282 | unsigned char name[7]; /* Device Name */ |
333 | struct msi_msg saved_msg; | 283 | struct msi_msg saved_msg; |
334 | struct sys_device sysdev; | 284 | struct sys_device sysdev; |
285 | #endif | ||
286 | struct q_inval *qi; /* Queued invalidation info */ | ||
287 | #ifdef CONFIG_INTR_REMAP | ||
288 | struct ir_table *ir_table; /* Interrupt remapping info */ | ||
289 | #endif | ||
335 | }; | 290 | }; |
336 | 291 | ||
337 | #ifndef CONFIG_DMAR_GFX_WA | 292 | static inline void __iommu_flush_cache( |
338 | static inline void iommu_prepare_gfx_mapping(void) | 293 | struct intel_iommu *iommu, void *addr, int size) |
339 | { | 294 | { |
340 | return; | 295 | if (!ecap_coherent(iommu->ecap)) |
296 | clflush_cache_range(addr, size); | ||
341 | } | 297 | } |
342 | #endif /* !CONFIG_DMAR_GFX_WA */ | ||
343 | 298 | ||
299 | extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); | ||
300 | |||
301 | extern int alloc_iommu(struct dmar_drhd_unit *drhd); | ||
302 | extern void free_iommu(struct intel_iommu *iommu); | ||
303 | extern int dmar_enable_qi(struct intel_iommu *iommu); | ||
304 | extern void qi_global_iec(struct intel_iommu *iommu); | ||
305 | |||
306 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | ||
344 | #endif | 307 | #endif |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c new file mode 100644 index 000000000000..bb642cc5e18c --- /dev/null +++ b/drivers/pci/intr_remapping.c | |||
@@ -0,0 +1,471 @@ | |||
1 | #include <linux/dmar.h> | ||
2 | #include <linux/spinlock.h> | ||
3 | #include <linux/jiffies.h> | ||
4 | #include <linux/pci.h> | ||
5 | #include <linux/irq.h> | ||
6 | #include <asm/io_apic.h> | ||
7 | #include "intel-iommu.h" | ||
8 | #include "intr_remapping.h" | ||
9 | |||
10 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | ||
11 | static int ir_ioapic_num; | ||
12 | int intr_remapping_enabled; | ||
13 | |||
14 | static struct { | ||
15 | struct intel_iommu *iommu; | ||
16 | u16 irte_index; | ||
17 | u16 sub_handle; | ||
18 | u8 irte_mask; | ||
19 | } irq_2_iommu[NR_IRQS]; | ||
20 | |||
21 | static DEFINE_SPINLOCK(irq_2_ir_lock); | ||
22 | |||
23 | int irq_remapped(int irq) | ||
24 | { | ||
25 | if (irq > NR_IRQS) | ||
26 | return 0; | ||
27 | |||
28 | if (!irq_2_iommu[irq].iommu) | ||
29 | return 0; | ||
30 | |||
31 | return 1; | ||
32 | } | ||
33 | |||
34 | int get_irte(int irq, struct irte *entry) | ||
35 | { | ||
36 | int index; | ||
37 | |||
38 | if (!entry || irq > NR_IRQS) | ||
39 | return -1; | ||
40 | |||
41 | spin_lock(&irq_2_ir_lock); | ||
42 | if (!irq_2_iommu[irq].iommu) { | ||
43 | spin_unlock(&irq_2_ir_lock); | ||
44 | return -1; | ||
45 | } | ||
46 | |||
47 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
48 | *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index); | ||
49 | |||
50 | spin_unlock(&irq_2_ir_lock); | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | ||
55 | { | ||
56 | struct ir_table *table = iommu->ir_table; | ||
57 | u16 index, start_index; | ||
58 | unsigned int mask = 0; | ||
59 | int i; | ||
60 | |||
61 | if (!count) | ||
62 | return -1; | ||
63 | |||
64 | /* | ||
65 | * start the IRTE search from index 0. | ||
66 | */ | ||
67 | index = start_index = 0; | ||
68 | |||
69 | if (count > 1) { | ||
70 | count = __roundup_pow_of_two(count); | ||
71 | mask = ilog2(count); | ||
72 | } | ||
73 | |||
74 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | ||
75 | printk(KERN_ERR | ||
76 | "Requested mask %x exceeds the max invalidation handle" | ||
77 | " mask value %Lx\n", mask, | ||
78 | ecap_max_handle_mask(iommu->ecap)); | ||
79 | return -1; | ||
80 | } | ||
81 | |||
82 | spin_lock(&irq_2_ir_lock); | ||
83 | do { | ||
84 | for (i = index; i < index + count; i++) | ||
85 | if (table->base[i].present) | ||
86 | break; | ||
87 | /* empty index found */ | ||
88 | if (i == index + count) | ||
89 | break; | ||
90 | |||
91 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | ||
92 | |||
93 | if (index == start_index) { | ||
94 | spin_unlock(&irq_2_ir_lock); | ||
95 | printk(KERN_ERR "can't allocate an IRTE\n"); | ||
96 | return -1; | ||
97 | } | ||
98 | } while (1); | ||
99 | |||
100 | for (i = index; i < index + count; i++) | ||
101 | table->base[i].present = 1; | ||
102 | |||
103 | irq_2_iommu[irq].iommu = iommu; | ||
104 | irq_2_iommu[irq].irte_index = index; | ||
105 | irq_2_iommu[irq].sub_handle = 0; | ||
106 | irq_2_iommu[irq].irte_mask = mask; | ||
107 | |||
108 | spin_unlock(&irq_2_ir_lock); | ||
109 | |||
110 | return index; | ||
111 | } | ||
112 | |||
113 | static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | ||
114 | { | ||
115 | struct qi_desc desc; | ||
116 | |||
117 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | ||
118 | | QI_IEC_SELECTIVE; | ||
119 | desc.high = 0; | ||
120 | |||
121 | qi_submit_sync(&desc, iommu); | ||
122 | } | ||
123 | |||
124 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | ||
125 | { | ||
126 | int index; | ||
127 | |||
128 | spin_lock(&irq_2_ir_lock); | ||
129 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
130 | spin_unlock(&irq_2_ir_lock); | ||
131 | return -1; | ||
132 | } | ||
133 | |||
134 | *sub_handle = irq_2_iommu[irq].sub_handle; | ||
135 | index = irq_2_iommu[irq].irte_index; | ||
136 | spin_unlock(&irq_2_ir_lock); | ||
137 | return index; | ||
138 | } | ||
139 | |||
140 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | ||
141 | { | ||
142 | spin_lock(&irq_2_ir_lock); | ||
143 | if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) { | ||
144 | spin_unlock(&irq_2_ir_lock); | ||
145 | return -1; | ||
146 | } | ||
147 | |||
148 | irq_2_iommu[irq].iommu = iommu; | ||
149 | irq_2_iommu[irq].irte_index = index; | ||
150 | irq_2_iommu[irq].sub_handle = subhandle; | ||
151 | irq_2_iommu[irq].irte_mask = 0; | ||
152 | |||
153 | spin_unlock(&irq_2_ir_lock); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | ||
159 | { | ||
160 | spin_lock(&irq_2_ir_lock); | ||
161 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
162 | spin_unlock(&irq_2_ir_lock); | ||
163 | return -1; | ||
164 | } | ||
165 | |||
166 | irq_2_iommu[irq].iommu = NULL; | ||
167 | irq_2_iommu[irq].irte_index = 0; | ||
168 | irq_2_iommu[irq].sub_handle = 0; | ||
169 | irq_2_iommu[irq].irte_mask = 0; | ||
170 | |||
171 | spin_unlock(&irq_2_ir_lock); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | int modify_irte(int irq, struct irte *irte_modified) | ||
177 | { | ||
178 | int index; | ||
179 | struct irte *irte; | ||
180 | struct intel_iommu *iommu; | ||
181 | |||
182 | spin_lock(&irq_2_ir_lock); | ||
183 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
184 | spin_unlock(&irq_2_ir_lock); | ||
185 | return -1; | ||
186 | } | ||
187 | |||
188 | iommu = irq_2_iommu[irq].iommu; | ||
189 | |||
190 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
191 | irte = &iommu->ir_table->base[index]; | ||
192 | |||
193 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | ||
194 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | ||
195 | |||
196 | qi_flush_iec(iommu, index, 0); | ||
197 | |||
198 | spin_unlock(&irq_2_ir_lock); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | int flush_irte(int irq) | ||
203 | { | ||
204 | int index; | ||
205 | struct intel_iommu *iommu; | ||
206 | |||
207 | spin_lock(&irq_2_ir_lock); | ||
208 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
209 | spin_unlock(&irq_2_ir_lock); | ||
210 | return -1; | ||
211 | } | ||
212 | |||
213 | iommu = irq_2_iommu[irq].iommu; | ||
214 | |||
215 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
216 | |||
217 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | ||
218 | spin_unlock(&irq_2_ir_lock); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | struct intel_iommu *map_ioapic_to_ir(int apic) | ||
224 | { | ||
225 | int i; | ||
226 | |||
227 | for (i = 0; i < MAX_IO_APICS; i++) | ||
228 | if (ir_ioapic[i].id == apic) | ||
229 | return ir_ioapic[i].iommu; | ||
230 | return NULL; | ||
231 | } | ||
232 | |||
233 | struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | ||
234 | { | ||
235 | struct dmar_drhd_unit *drhd; | ||
236 | |||
237 | drhd = dmar_find_matched_drhd_unit(dev); | ||
238 | if (!drhd) | ||
239 | return NULL; | ||
240 | |||
241 | return drhd->iommu; | ||
242 | } | ||
243 | |||
244 | int free_irte(int irq) | ||
245 | { | ||
246 | int index, i; | ||
247 | struct irte *irte; | ||
248 | struct intel_iommu *iommu; | ||
249 | |||
250 | spin_lock(&irq_2_ir_lock); | ||
251 | if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { | ||
252 | spin_unlock(&irq_2_ir_lock); | ||
253 | return -1; | ||
254 | } | ||
255 | |||
256 | iommu = irq_2_iommu[irq].iommu; | ||
257 | |||
258 | index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; | ||
259 | irte = &iommu->ir_table->base[index]; | ||
260 | |||
261 | if (!irq_2_iommu[irq].sub_handle) { | ||
262 | for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++) | ||
263 | set_64bit((unsigned long *)irte, 0); | ||
264 | qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); | ||
265 | } | ||
266 | |||
267 | irq_2_iommu[irq].iommu = NULL; | ||
268 | irq_2_iommu[irq].irte_index = 0; | ||
269 | irq_2_iommu[irq].sub_handle = 0; | ||
270 | irq_2_iommu[irq].irte_mask = 0; | ||
271 | |||
272 | spin_unlock(&irq_2_ir_lock); | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) | ||
278 | { | ||
279 | u64 addr; | ||
280 | u32 cmd, sts; | ||
281 | unsigned long flags; | ||
282 | |||
283 | addr = virt_to_phys((void *)iommu->ir_table->base); | ||
284 | |||
285 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
286 | |||
287 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | ||
288 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | ||
289 | |||
290 | /* Set interrupt-remapping table pointer */ | ||
291 | cmd = iommu->gcmd | DMA_GCMD_SIRTP; | ||
292 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | ||
293 | |||
294 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | ||
295 | readl, (sts & DMA_GSTS_IRTPS), sts); | ||
296 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
297 | |||
298 | /* | ||
299 | * global invalidation of interrupt entry cache before enabling | ||
300 | * interrupt-remapping. | ||
301 | */ | ||
302 | qi_global_iec(iommu); | ||
303 | |||
304 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
305 | |||
306 | /* Enable interrupt-remapping */ | ||
307 | cmd = iommu->gcmd | DMA_GCMD_IRE; | ||
308 | iommu->gcmd |= DMA_GCMD_IRE; | ||
309 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | ||
310 | |||
311 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | ||
312 | readl, (sts & DMA_GSTS_IRES), sts); | ||
313 | |||
314 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
315 | } | ||
316 | |||
317 | |||
318 | static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | ||
319 | { | ||
320 | struct ir_table *ir_table; | ||
321 | struct page *pages; | ||
322 | |||
323 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | ||
324 | GFP_KERNEL); | ||
325 | |||
326 | if (!iommu->ir_table) | ||
327 | return -ENOMEM; | ||
328 | |||
329 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); | ||
330 | |||
331 | if (!pages) { | ||
332 | printk(KERN_ERR "failed to allocate pages of order %d\n", | ||
333 | INTR_REMAP_PAGE_ORDER); | ||
334 | kfree(iommu->ir_table); | ||
335 | return -ENOMEM; | ||
336 | } | ||
337 | |||
338 | ir_table->base = page_address(pages); | ||
339 | |||
340 | iommu_set_intr_remapping(iommu, mode); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | int __init enable_intr_remapping(int eim) | ||
345 | { | ||
346 | struct dmar_drhd_unit *drhd; | ||
347 | int setup = 0; | ||
348 | |||
349 | /* | ||
350 | * check for the Interrupt-remapping support | ||
351 | */ | ||
352 | for_each_drhd_unit(drhd) { | ||
353 | struct intel_iommu *iommu = drhd->iommu; | ||
354 | |||
355 | if (!ecap_ir_support(iommu->ecap)) | ||
356 | continue; | ||
357 | |||
358 | if (eim && !ecap_eim_support(iommu->ecap)) { | ||
359 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | ||
360 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | ||
361 | return -1; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Enable queued invalidation for all the DRHD's. | ||
367 | */ | ||
368 | for_each_drhd_unit(drhd) { | ||
369 | int ret; | ||
370 | struct intel_iommu *iommu = drhd->iommu; | ||
371 | ret = dmar_enable_qi(iommu); | ||
372 | |||
373 | if (ret) { | ||
374 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | ||
375 | " invalidation, ecap %Lx, ret %d\n", | ||
376 | drhd->reg_base_addr, iommu->ecap, ret); | ||
377 | return -1; | ||
378 | } | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Setup Interrupt-remapping for all the DRHD's now. | ||
383 | */ | ||
384 | for_each_drhd_unit(drhd) { | ||
385 | struct intel_iommu *iommu = drhd->iommu; | ||
386 | |||
387 | if (!ecap_ir_support(iommu->ecap)) | ||
388 | continue; | ||
389 | |||
390 | if (setup_intr_remapping(iommu, eim)) | ||
391 | goto error; | ||
392 | |||
393 | setup = 1; | ||
394 | } | ||
395 | |||
396 | if (!setup) | ||
397 | goto error; | ||
398 | |||
399 | intr_remapping_enabled = 1; | ||
400 | |||
401 | return 0; | ||
402 | |||
403 | error: | ||
404 | /* | ||
405 | * handle error condition gracefully here! | ||
406 | */ | ||
407 | return -1; | ||
408 | } | ||
409 | |||
410 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | ||
411 | struct intel_iommu *iommu) | ||
412 | { | ||
413 | struct acpi_dmar_hardware_unit *drhd; | ||
414 | struct acpi_dmar_device_scope *scope; | ||
415 | void *start, *end; | ||
416 | |||
417 | drhd = (struct acpi_dmar_hardware_unit *)header; | ||
418 | |||
419 | start = (void *)(drhd + 1); | ||
420 | end = ((void *)drhd) + header->length; | ||
421 | |||
422 | while (start < end) { | ||
423 | scope = start; | ||
424 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | ||
425 | if (ir_ioapic_num == MAX_IO_APICS) { | ||
426 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | ||
427 | return -1; | ||
428 | } | ||
429 | |||
430 | printk(KERN_INFO "IOAPIC id %d under DRHD base" | ||
431 | " 0x%Lx\n", scope->enumeration_id, | ||
432 | drhd->address); | ||
433 | |||
434 | ir_ioapic[ir_ioapic_num].iommu = iommu; | ||
435 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | ||
436 | ir_ioapic_num++; | ||
437 | } | ||
438 | start += scope->length; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | ||
446 | * hardware unit. | ||
447 | */ | ||
448 | int __init parse_ioapics_under_ir(void) | ||
449 | { | ||
450 | struct dmar_drhd_unit *drhd; | ||
451 | int ir_supported = 0; | ||
452 | |||
453 | for_each_drhd_unit(drhd) { | ||
454 | struct intel_iommu *iommu = drhd->iommu; | ||
455 | |||
456 | if (ecap_ir_support(iommu->ecap)) { | ||
457 | if (ir_parse_ioapic_scope(drhd->hdr, iommu)) | ||
458 | return -1; | ||
459 | |||
460 | ir_supported = 1; | ||
461 | } | ||
462 | } | ||
463 | |||
464 | if (ir_supported && ir_ioapic_num != nr_ioapics) { | ||
465 | printk(KERN_WARNING | ||
466 | "Not all IO-APIC's listed under remapping hardware\n"); | ||
467 | return -1; | ||
468 | } | ||
469 | |||
470 | return ir_supported; | ||
471 | } | ||
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h new file mode 100644 index 000000000000..05f2635bbe4e --- /dev/null +++ b/drivers/pci/intr_remapping.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #include "intel-iommu.h" | ||
2 | |||
3 | struct ioapic_scope { | ||
4 | struct intel_iommu *iommu; | ||
5 | unsigned int id; | ||
6 | }; | ||
7 | |||
8 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index cb752ba72466..7440a0dceddb 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -385,6 +385,7 @@ | |||
385 | . = ALIGN(align); \ | 385 | . = ALIGN(align); \ |
386 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 386 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
387 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 387 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ |
388 | *(.data.percpu.page_aligned) \ | ||
388 | *(.data.percpu) \ | 389 | *(.data.percpu) \ |
389 | *(.data.percpu.shared_aligned) \ | 390 | *(.data.percpu.shared_aligned) \ |
390 | } \ | 391 | } \ |
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h index 714207a1c387..f5705761a37b 100644 --- a/include/asm-x86/a.out-core.h +++ b/include/asm-x86/a.out-core.h | |||
@@ -9,8 +9,8 @@ | |||
9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the Licence, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _ASM_A_OUT_CORE_H | 12 | #ifndef ASM_X86__A_OUT_CORE_H |
13 | #define _ASM_A_OUT_CORE_H | 13 | #define ASM_X86__A_OUT_CORE_H |
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | #ifdef CONFIG_X86_32 | 16 | #ifdef CONFIG_X86_32 |
@@ -70,4 +70,4 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
70 | 70 | ||
71 | #endif /* CONFIG_X86_32 */ | 71 | #endif /* CONFIG_X86_32 */ |
72 | #endif /* __KERNEL__ */ | 72 | #endif /* __KERNEL__ */ |
73 | #endif /* _ASM_A_OUT_CORE_H */ | 73 | #endif /* ASM_X86__A_OUT_CORE_H */ |
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h index 4684f97a5bbd..0948748bc69c 100644 --- a/include/asm-x86/a.out.h +++ b/include/asm-x86/a.out.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_A_OUT_H | 1 | #ifndef ASM_X86__A_OUT_H |
2 | #define _ASM_X86_A_OUT_H | 2 | #define ASM_X86__A_OUT_H |
3 | 3 | ||
4 | struct exec | 4 | struct exec |
5 | { | 5 | { |
@@ -17,4 +17,4 @@ struct exec | |||
17 | #define N_DRSIZE(a) ((a).a_drsize) | 17 | #define N_DRSIZE(a) ((a).a_drsize) |
18 | #define N_SYMSIZE(a) ((a).a_syms) | 18 | #define N_SYMSIZE(a) ((a).a_syms) |
19 | 19 | ||
20 | #endif /* _ASM_X86_A_OUT_H */ | 20 | #endif /* ASM_X86__A_OUT_H */ |
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 635d764dc13e..bd76299586b3 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_ACPI_H | 1 | #ifndef ASM_X86__ACPI_H |
2 | #define _ASM_X86_ACPI_H | 2 | #define ASM_X86__ACPI_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | 5 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
@@ -173,4 +173,4 @@ static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, | |||
173 | 173 | ||
174 | #define acpi_unlazy_tlb(x) leave_mm(x) | 174 | #define acpi_unlazy_tlb(x) leave_mm(x) |
175 | 175 | ||
176 | #endif /*__X86_ASM_ACPI_H*/ | 176 | #endif /* ASM_X86__ACPI_H */ |
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h index e4004a9f6a9a..3617fd4fcdf9 100644 --- a/include/asm-x86/agp.h +++ b/include/asm-x86/agp.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_AGP_H | 1 | #ifndef ASM_X86__AGP_H |
2 | #define _ASM_X86_AGP_H | 2 | #define ASM_X86__AGP_H |
3 | 3 | ||
4 | #include <asm/pgtable.h> | 4 | #include <asm/pgtable.h> |
5 | #include <asm/cacheflush.h> | 5 | #include <asm/cacheflush.h> |
@@ -32,4 +32,4 @@ | |||
32 | #define free_gatt_pages(table, order) \ | 32 | #define free_gatt_pages(table, order) \ |
33 | free_pages((unsigned long)(table), (order)) | 33 | free_pages((unsigned long)(table), (order)) |
34 | 34 | ||
35 | #endif | 35 | #endif /* ASM_X86__AGP_H */ |
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index f6aa18eadf71..22d3c9862bf3 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_ALTERNATIVE_H | 1 | #ifndef ASM_X86__ALTERNATIVE_H |
2 | #define _ASM_X86_ALTERNATIVE_H | 2 | #define ASM_X86__ALTERNATIVE_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
@@ -180,4 +180,4 @@ extern void add_nops(void *insns, unsigned int len); | |||
180 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 180 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
181 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | 181 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); |
182 | 182 | ||
183 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 183 | #endif /* ASM_X86__ALTERNATIVE_H */ |
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h index 30a12049353b..783f43e58052 100644 --- a/include/asm-x86/amd_iommu.h +++ b/include/asm-x86/amd_iommu.h | |||
@@ -17,8 +17,8 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef _ASM_X86_AMD_IOMMU_H | 20 | #ifndef ASM_X86__AMD_IOMMU_H |
21 | #define _ASM_X86_AMD_IOMMU_H | 21 | #define ASM_X86__AMD_IOMMU_H |
22 | 22 | ||
23 | #ifdef CONFIG_AMD_IOMMU | 23 | #ifdef CONFIG_AMD_IOMMU |
24 | extern int amd_iommu_init(void); | 24 | extern int amd_iommu_init(void); |
@@ -29,4 +29,4 @@ static inline int amd_iommu_init(void) { return -ENODEV; } | |||
29 | static inline void amd_iommu_detect(void) { } | 29 | static inline void amd_iommu_detect(void) { } |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #endif | 32 | #endif /* ASM_X86__AMD_IOMMU_H */ |
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index dcc812067394..1ffa4e53c989 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h | |||
@@ -17,8 +17,8 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef __AMD_IOMMU_TYPES_H__ | 20 | #ifndef ASM_X86__AMD_IOMMU_TYPES_H |
21 | #define __AMD_IOMMU_TYPES_H__ | 21 | #define ASM_X86__AMD_IOMMU_TYPES_H |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
@@ -341,4 +341,4 @@ static inline u16 calc_devid(u8 bus, u8 devfn) | |||
341 | return (((u16)bus) << 8) | devfn; | 341 | return (((u16)bus) << 8) | devfn; |
342 | } | 342 | } |
343 | 343 | ||
344 | #endif | 344 | #endif /* ASM_X86__AMD_IOMMU_TYPES_H */ |
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 133c998161ca..1311c82b165b 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_APIC_H | 1 | #ifndef ASM_X86__APIC_H |
2 | #define _ASM_X86_APIC_H | 2 | #define ASM_X86__APIC_H |
3 | 3 | ||
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/apicdef.h> | 9 | #include <asm/apicdef.h> |
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/system.h> | 11 | #include <asm/system.h> |
12 | #include <asm/cpufeature.h> | ||
13 | #include <asm/msr.h> | ||
12 | 14 | ||
13 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | 15 | #define ARCH_APICTIMER_STOPS_ON_C3 1 |
14 | 16 | ||
@@ -47,15 +49,18 @@ extern int disable_apic; | |||
47 | #ifdef CONFIG_PARAVIRT | 49 | #ifdef CONFIG_PARAVIRT |
48 | #include <asm/paravirt.h> | 50 | #include <asm/paravirt.h> |
49 | #else | 51 | #else |
50 | #define apic_write native_apic_write | ||
51 | #define apic_read native_apic_read | ||
52 | #define setup_boot_clock setup_boot_APIC_clock | 52 | #define setup_boot_clock setup_boot_APIC_clock |
53 | #define setup_secondary_clock setup_secondary_APIC_clock | 53 | #define setup_secondary_clock setup_secondary_APIC_clock |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | extern int is_vsmp_box(void); | 56 | extern int is_vsmp_box(void); |
57 | extern void xapic_wait_icr_idle(void); | ||
58 | extern u32 safe_xapic_wait_icr_idle(void); | ||
59 | extern u64 xapic_icr_read(void); | ||
60 | extern void xapic_icr_write(u32, u32); | ||
61 | extern int setup_profiling_timer(unsigned int); | ||
57 | 62 | ||
58 | static inline void native_apic_write(unsigned long reg, u32 v) | 63 | static inline void native_apic_mem_write(u32 reg, u32 v) |
59 | { | 64 | { |
60 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); | 65 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); |
61 | 66 | ||
@@ -64,15 +69,68 @@ static inline void native_apic_write(unsigned long reg, u32 v) | |||
64 | ASM_OUTPUT2("0" (v), "m" (*addr))); | 69 | ASM_OUTPUT2("0" (v), "m" (*addr))); |
65 | } | 70 | } |
66 | 71 | ||
67 | static inline u32 native_apic_read(unsigned long reg) | 72 | static inline u32 native_apic_mem_read(u32 reg) |
68 | { | 73 | { |
69 | return *((volatile u32 *)(APIC_BASE + reg)); | 74 | return *((volatile u32 *)(APIC_BASE + reg)); |
70 | } | 75 | } |
71 | 76 | ||
72 | extern void apic_wait_icr_idle(void); | 77 | static inline void native_apic_msr_write(u32 reg, u32 v) |
73 | extern u32 safe_apic_wait_icr_idle(void); | 78 | { |
79 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || | ||
80 | reg == APIC_LVR) | ||
81 | return; | ||
82 | |||
83 | wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); | ||
84 | } | ||
85 | |||
86 | static inline u32 native_apic_msr_read(u32 reg) | ||
87 | { | ||
88 | u32 low, high; | ||
89 | |||
90 | if (reg == APIC_DFR) | ||
91 | return -1; | ||
92 | |||
93 | rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); | ||
94 | return low; | ||
95 | } | ||
96 | |||
97 | #ifndef CONFIG_X86_32 | ||
98 | extern int x2apic, x2apic_preenabled; | ||
99 | extern void check_x2apic(void); | ||
100 | extern void enable_x2apic(void); | ||
101 | extern void enable_IR_x2apic(void); | ||
102 | extern void x2apic_icr_write(u32 low, u32 id); | ||
103 | #endif | ||
104 | |||
105 | struct apic_ops { | ||
106 | u32 (*read)(u32 reg); | ||
107 | void (*write)(u32 reg, u32 v); | ||
108 | u64 (*icr_read)(void); | ||
109 | void (*icr_write)(u32 low, u32 high); | ||
110 | void (*wait_icr_idle)(void); | ||
111 | u32 (*safe_wait_icr_idle)(void); | ||
112 | }; | ||
113 | |||
114 | extern struct apic_ops *apic_ops; | ||
115 | |||
116 | #define apic_read (apic_ops->read) | ||
117 | #define apic_write (apic_ops->write) | ||
118 | #define apic_icr_read (apic_ops->icr_read) | ||
119 | #define apic_icr_write (apic_ops->icr_write) | ||
120 | #define apic_wait_icr_idle (apic_ops->wait_icr_idle) | ||
121 | #define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle) | ||
122 | |||
74 | extern int get_physical_broadcast(void); | 123 | extern int get_physical_broadcast(void); |
75 | 124 | ||
125 | #ifdef CONFIG_X86_64 | ||
126 | static inline void ack_x2APIC_irq(void) | ||
127 | { | ||
128 | /* Docs say use 0 for future compatibility */ | ||
129 | native_apic_msr_write(APIC_EOI, 0); | ||
130 | } | ||
131 | #endif | ||
132 | |||
133 | |||
76 | static inline void ack_APIC_irq(void) | 134 | static inline void ack_APIC_irq(void) |
77 | { | 135 | { |
78 | /* | 136 | /* |
@@ -128,4 +186,4 @@ static inline void init_apic_mappings(void) { } | |||
128 | 186 | ||
129 | #endif /* !CONFIG_X86_LOCAL_APIC */ | 187 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
130 | 188 | ||
131 | #endif /* __ASM_APIC_H */ | 189 | #endif /* ASM_X86__APIC_H */ |
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 6b9008c78731..b922c85ac91d 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_APICDEF_H | 1 | #ifndef ASM_X86__APICDEF_H |
2 | #define _ASM_X86_APICDEF_H | 2 | #define ASM_X86__APICDEF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) | 5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) |
@@ -105,6 +105,7 @@ | |||
105 | #define APIC_TMICT 0x380 | 105 | #define APIC_TMICT 0x380 |
106 | #define APIC_TMCCT 0x390 | 106 | #define APIC_TMCCT 0x390 |
107 | #define APIC_TDCR 0x3E0 | 107 | #define APIC_TDCR 0x3E0 |
108 | #define APIC_SELF_IPI 0x3F0 | ||
108 | #define APIC_TDR_DIV_TMBASE (1 << 2) | 109 | #define APIC_TDR_DIV_TMBASE (1 << 2) |
109 | #define APIC_TDR_DIV_1 0xB | 110 | #define APIC_TDR_DIV_1 0xB |
110 | #define APIC_TDR_DIV_2 0x0 | 111 | #define APIC_TDR_DIV_2 0x0 |
@@ -128,6 +129,8 @@ | |||
128 | #define APIC_EILVT3 0x530 | 129 | #define APIC_EILVT3 0x530 |
129 | 130 | ||
130 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | 131 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
132 | #define APIC_BASE_MSR 0x800 | ||
133 | #define X2APIC_ENABLE (1UL << 10) | ||
131 | 134 | ||
132 | #ifdef CONFIG_X86_32 | 135 | #ifdef CONFIG_X86_32 |
133 | # define MAX_IO_APICS 64 | 136 | # define MAX_IO_APICS 64 |
@@ -411,4 +414,4 @@ struct local_apic { | |||
411 | #else | 414 | #else |
412 | #define BAD_APICID 0xFFFFu | 415 | #define BAD_APICID 0xFFFFu |
413 | #endif | 416 | #endif |
414 | #endif | 417 | #endif /* ASM_X86__APICDEF_H */ |
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h index 8411750ceb63..de4596b24c23 100644 --- a/include/asm-x86/arch_hooks.h +++ b/include/asm-x86/arch_hooks.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_ARCH_HOOKS_H | 1 | #ifndef ASM_X86__ARCH_HOOKS_H |
2 | #define _ASM_ARCH_HOOKS_H | 2 | #define ASM_X86__ARCH_HOOKS_H |
3 | 3 | ||
4 | #include <linux/interrupt.h> | 4 | #include <linux/interrupt.h> |
5 | 5 | ||
@@ -12,8 +12,6 @@ | |||
12 | /* these aren't arch hooks, they are generic routines | 12 | /* these aren't arch hooks, they are generic routines |
13 | * that can be used by the hooks */ | 13 | * that can be used by the hooks */ |
14 | extern void init_ISA_irqs(void); | 14 | extern void init_ISA_irqs(void); |
15 | extern void apic_intr_init(void); | ||
16 | extern void smp_intr_init(void); | ||
17 | extern irqreturn_t timer_interrupt(int irq, void *dev_id); | 15 | extern irqreturn_t timer_interrupt(int irq, void *dev_id); |
18 | 16 | ||
19 | /* these are the defined hooks */ | 17 | /* these are the defined hooks */ |
@@ -25,4 +23,4 @@ extern void pre_time_init_hook(void); | |||
25 | extern void time_init_hook(void); | 23 | extern void time_init_hook(void); |
26 | extern void mca_nmi_hook(void); | 24 | extern void mca_nmi_hook(void); |
27 | 25 | ||
28 | #endif | 26 | #endif /* ASM_X86__ARCH_HOOKS_H */ |
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h index 97220321f39d..2439ae49e8ac 100644 --- a/include/asm-x86/asm.h +++ b/include/asm-x86/asm.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_ASM_H | 1 | #ifndef ASM_X86__ASM_H |
2 | #define _ASM_X86_ASM_H | 2 | #define ASM_X86__ASM_H |
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | # define __ASM_FORM(x) x | 5 | # define __ASM_FORM(x) x |
@@ -39,4 +39,4 @@ | |||
39 | _ASM_PTR #from "," #to "\n" \ | 39 | _ASM_PTR #from "," #to "\n" \ |
40 | " .previous\n" | 40 | " .previous\n" |
41 | 41 | ||
42 | #endif /* _ASM_X86_ASM_H */ | 42 | #endif /* ASM_X86__ASM_H */ |
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h index 21a4825148c0..14d3f0beb889 100644 --- a/include/asm-x86/atomic_32.h +++ b/include/asm-x86/atomic_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ARCH_I386_ATOMIC__ | 1 | #ifndef ASM_X86__ATOMIC_32_H |
2 | #define __ARCH_I386_ATOMIC__ | 2 | #define ASM_X86__ATOMIC_32_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
@@ -256,4 +256,4 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
256 | #define smp_mb__after_atomic_inc() barrier() | 256 | #define smp_mb__after_atomic_inc() barrier() |
257 | 257 | ||
258 | #include <asm-generic/atomic.h> | 258 | #include <asm-generic/atomic.h> |
259 | #endif | 259 | #endif /* ASM_X86__ATOMIC_32_H */ |
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 91c7d03e65bc..2cb218c4a356 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ARCH_X86_64_ATOMIC__ | 1 | #ifndef ASM_X86__ATOMIC_64_H |
2 | #define __ARCH_X86_64_ATOMIC__ | 2 | #define ASM_X86__ATOMIC_64_H |
3 | 3 | ||
4 | #include <asm/alternative.h> | 4 | #include <asm/alternative.h> |
5 | #include <asm/cmpxchg.h> | 5 | #include <asm/cmpxchg.h> |
@@ -470,4 +470,4 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | |||
470 | #define smp_mb__after_atomic_inc() barrier() | 470 | #define smp_mb__after_atomic_inc() barrier() |
471 | 471 | ||
472 | #include <asm-generic/atomic.h> | 472 | #include <asm-generic/atomic.h> |
473 | #endif | 473 | #endif /* ASM_X86__ATOMIC_64_H */ |
diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h index 87f5e6d5a020..12c7cac74202 100644 --- a/include/asm-x86/auxvec.h +++ b/include/asm-x86/auxvec.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_AUXVEC_H | 1 | #ifndef ASM_X86__AUXVEC_H |
2 | #define _ASM_X86_AUXVEC_H | 2 | #define ASM_X86__AUXVEC_H |
3 | /* | 3 | /* |
4 | * Architecture-neutral AT_ values in 0-17, leave some room | 4 | * Architecture-neutral AT_ values in 0-17, leave some room |
5 | * for more of them, start the x86-specific ones at 32. | 5 | * for more of them, start the x86-specific ones at 32. |
@@ -9,4 +9,4 @@ | |||
9 | #endif | 9 | #endif |
10 | #define AT_SYSINFO_EHDR 33 | 10 | #define AT_SYSINFO_EHDR 33 |
11 | 11 | ||
12 | #endif | 12 | #endif /* ASM_X86__AUXVEC_H */ |
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/bigsmp/apic.h index c3b9dc6970c9..0a9cd7c5ca0c 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/bigsmp/apic.h | |||
@@ -11,7 +11,7 @@ static inline int apic_id_registered(void) | |||
11 | 11 | ||
12 | /* Round robin the irqs amoung the online cpus */ | 12 | /* Round robin the irqs amoung the online cpus */ |
13 | static inline cpumask_t target_cpus(void) | 13 | static inline cpumask_t target_cpus(void) |
14 | { | 14 | { |
15 | static unsigned long cpu = NR_CPUS; | 15 | static unsigned long cpu = NR_CPUS; |
16 | do { | 16 | do { |
17 | if (cpu >= NR_CPUS) | 17 | if (cpu >= NR_CPUS) |
@@ -23,7 +23,7 @@ static inline cpumask_t target_cpus(void) | |||
23 | } | 23 | } |
24 | 24 | ||
25 | #undef APIC_DEST_LOGICAL | 25 | #undef APIC_DEST_LOGICAL |
26 | #define APIC_DEST_LOGICAL 0 | 26 | #define APIC_DEST_LOGICAL 0 |
27 | #define TARGET_CPUS (target_cpus()) | 27 | #define TARGET_CPUS (target_cpus()) |
28 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 28 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
29 | #define INT_DELIVERY_MODE (dest_Fixed) | 29 | #define INT_DELIVERY_MODE (dest_Fixed) |
diff --git a/include/asm-x86/mach-bigsmp/mach_apicdef.h b/include/asm-x86/bigsmp/apicdef.h index a58ab5a75c8c..392c3f5ef2fe 100644 --- a/include/asm-x86/mach-bigsmp/mach_apicdef.h +++ b/include/asm-x86/bigsmp/apicdef.h | |||
@@ -3,10 +3,10 @@ | |||
3 | 3 | ||
4 | #define APIC_ID_MASK (0xFF<<24) | 4 | #define APIC_ID_MASK (0xFF<<24) |
5 | 5 | ||
6 | static inline unsigned get_apic_id(unsigned long x) | 6 | static inline unsigned get_apic_id(unsigned long x) |
7 | { | 7 | { |
8 | return (((x)>>24)&0xFF); | 8 | return (((x)>>24)&0xFF); |
9 | } | 9 | } |
10 | 10 | ||
11 | #define GET_APIC_ID(x) get_apic_id(x) | 11 | #define GET_APIC_ID(x) get_apic_id(x) |
12 | 12 | ||
diff --git a/include/asm-x86/mach-bigsmp/mach_ipi.h b/include/asm-x86/bigsmp/ipi.h index 9404c535b7ec..9404c535b7ec 100644 --- a/include/asm-x86/mach-bigsmp/mach_ipi.h +++ b/include/asm-x86/bigsmp/ipi.h | |||
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h index 0033e50c13b2..ec42ed874591 100644 --- a/include/asm-x86/bios_ebda.h +++ b/include/asm-x86/bios_ebda.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _MACH_BIOS_EBDA_H | 1 | #ifndef ASM_X86__BIOS_EBDA_H |
2 | #define _MACH_BIOS_EBDA_H | 2 | #define ASM_X86__BIOS_EBDA_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | 5 | ||
@@ -16,4 +16,4 @@ static inline unsigned int get_bios_ebda(void) | |||
16 | 16 | ||
17 | void reserve_ebda_region(void); | 17 | void reserve_ebda_region(void); |
18 | 18 | ||
19 | #endif /* _MACH_BIOS_EBDA_H */ | 19 | #endif /* ASM_X86__BIOS_EBDA_H */ |
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index cfb2b64f76e7..61989b93b475 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_BITOPS_H | 1 | #ifndef ASM_X86__BITOPS_H |
2 | #define _ASM_X86_BITOPS_H | 2 | #define ASM_X86__BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
@@ -458,4 +458,4 @@ static inline void set_bit_string(unsigned long *bitmap, | |||
458 | #include <asm-generic/bitops/minix.h> | 458 | #include <asm-generic/bitops/minix.h> |
459 | 459 | ||
460 | #endif /* __KERNEL__ */ | 460 | #endif /* __KERNEL__ */ |
461 | #endif /* _ASM_X86_BITOPS_H */ | 461 | #endif /* ASM_X86__BITOPS_H */ |
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h index 2faed7ecb092..825de5dc867c 100644 --- a/include/asm-x86/boot.h +++ b/include/asm-x86/boot.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_BOOT_H | 1 | #ifndef ASM_X86__BOOT_H |
2 | #define _ASM_BOOT_H | 2 | #define ASM_X86__BOOT_H |
3 | 3 | ||
4 | /* Don't touch these, unless you really know what you're doing. */ | 4 | /* Don't touch these, unless you really know what you're doing. */ |
5 | #define DEF_INITSEG 0x9000 | 5 | #define DEF_INITSEG 0x9000 |
@@ -25,4 +25,4 @@ | |||
25 | #define BOOT_STACK_SIZE 0x1000 | 25 | #define BOOT_STACK_SIZE 0x1000 |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #endif /* _ASM_BOOT_H */ | 28 | #endif /* ASM_X86__BOOT_H */ |
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h index ae22bdf0ab14..ccf027e2d97d 100644 --- a/include/asm-x86/bootparam.h +++ b/include/asm-x86/bootparam.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_BOOTPARAM_H | 1 | #ifndef ASM_X86__BOOTPARAM_H |
2 | #define _ASM_BOOTPARAM_H | 2 | #define ASM_X86__BOOTPARAM_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/screen_info.h> | 5 | #include <linux/screen_info.h> |
@@ -108,4 +108,4 @@ struct boot_params { | |||
108 | __u8 _pad9[276]; /* 0xeec */ | 108 | __u8 _pad9[276]; /* 0xeec */ |
109 | } __attribute__((packed)); | 109 | } __attribute__((packed)); |
110 | 110 | ||
111 | #endif /* _ASM_BOOTPARAM_H */ | 111 | #endif /* ASM_X86__BOOTPARAM_H */ |
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index b69aa64b82a4..91ad43a54c47 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_BUG_H | 1 | #ifndef ASM_X86__BUG_H |
2 | #define _ASM_X86_BUG_H | 2 | #define ASM_X86__BUG_H |
3 | 3 | ||
4 | #ifdef CONFIG_BUG | 4 | #ifdef CONFIG_BUG |
5 | #define HAVE_ARCH_BUG | 5 | #define HAVE_ARCH_BUG |
@@ -36,4 +36,4 @@ do { \ | |||
36 | #endif /* !CONFIG_BUG */ | 36 | #endif /* !CONFIG_BUG */ |
37 | 37 | ||
38 | #include <asm-generic/bug.h> | 38 | #include <asm-generic/bug.h> |
39 | #endif | 39 | #endif /* ASM_X86__BUG_H */ |
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index 021cbdd5f258..ae514c76a96f 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h | |||
@@ -1,7 +1,12 @@ | |||
1 | #ifndef _ASM_X86_BUGS_H | 1 | #ifndef ASM_X86__BUGS_H |
2 | #define _ASM_X86_BUGS_H | 2 | #define ASM_X86__BUGS_H |
3 | 3 | ||
4 | extern void check_bugs(void); | 4 | extern void check_bugs(void); |
5 | |||
6 | #ifdef CONFIG_CPU_SUP_INTEL_32 | ||
5 | int ppro_with_ram_bug(void); | 7 | int ppro_with_ram_bug(void); |
8 | #else | ||
9 | static inline int ppro_with_ram_bug(void) { return 0; } | ||
10 | #endif | ||
6 | 11 | ||
7 | #endif /* _ASM_X86_BUGS_H */ | 12 | #endif /* ASM_X86__BUGS_H */ |
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index e02ae2d89acf..722f27d68105 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_BYTEORDER_H | 1 | #ifndef ASM_X86__BYTEORDER_H |
2 | #define _ASM_X86_BYTEORDER_H | 2 | #define ASM_X86__BYTEORDER_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
@@ -78,4 +78,4 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) | |||
78 | 78 | ||
79 | #include <linux/byteorder/little_endian.h> | 79 | #include <linux/byteorder/little_endian.h> |
80 | 80 | ||
81 | #endif /* _ASM_X86_BYTEORDER_H */ | 81 | #endif /* ASM_X86__BYTEORDER_H */ |
diff --git a/include/asm-x86/cache.h b/include/asm-x86/cache.h index 1e0bac86f38f..ea3f1cc06a97 100644 --- a/include/asm-x86/cache.h +++ b/include/asm-x86/cache.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ARCH_X86_CACHE_H | 1 | #ifndef ASM_X86__CACHE_H |
2 | #define _ARCH_X86_CACHE_H | 2 | #define ASM_X86__CACHE_H |
3 | 3 | ||
4 | /* L1 cache line size */ | 4 | /* L1 cache line size */ |
5 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) | 5 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) |
@@ -17,4 +17,4 @@ | |||
17 | #endif | 17 | #endif |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #endif | 20 | #endif /* ASM_X86__CACHE_H */ |
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index f4c0ab50d2c2..59859cb28a36 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_CACHEFLUSH_H | 1 | #ifndef ASM_X86__CACHEFLUSH_H |
2 | #define _ASM_X86_CACHEFLUSH_H | 2 | #define ASM_X86__CACHEFLUSH_H |
3 | 3 | ||
4 | /* Keep includes the same across arches. */ | 4 | /* Keep includes the same across arches. */ |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
@@ -112,4 +112,4 @@ static inline int rodata_test(void) | |||
112 | } | 112 | } |
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | #endif | 115 | #endif /* ASM_X86__CACHEFLUSH_H */ |
diff --git a/include/asm-x86/calgary.h b/include/asm-x86/calgary.h index 67f60406e2d8..933fd272f826 100644 --- a/include/asm-x86/calgary.h +++ b/include/asm-x86/calgary.h | |||
@@ -21,8 +21,8 @@ | |||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifndef _ASM_X86_64_CALGARY_H | 24 | #ifndef ASM_X86__CALGARY_H |
25 | #define _ASM_X86_64_CALGARY_H | 25 | #define ASM_X86__CALGARY_H |
26 | 26 | ||
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
@@ -69,4 +69,4 @@ static inline int calgary_iommu_init(void) { return 1; } | |||
69 | static inline void detect_calgary(void) { return; } | 69 | static inline void detect_calgary(void) { return; } |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #endif /* _ASM_X86_64_CALGARY_H */ | 72 | #endif /* ASM_X86__CALGARY_H */ |
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h index 52bbb0d8c4c1..d041e8cda227 100644 --- a/include/asm-x86/checksum_32.h +++ b/include/asm-x86/checksum_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_CHECKSUM_H | 1 | #ifndef ASM_X86__CHECKSUM_32_H |
2 | #define _I386_CHECKSUM_H | 2 | #define ASM_X86__CHECKSUM_32_H |
3 | 3 | ||
4 | #include <linux/in6.h> | 4 | #include <linux/in6.h> |
5 | 5 | ||
@@ -186,4 +186,4 @@ static inline __wsum csum_and_copy_to_user(const void *src, | |||
186 | return (__force __wsum)-1; /* invalid checksum */ | 186 | return (__force __wsum)-1; /* invalid checksum */ |
187 | } | 187 | } |
188 | 188 | ||
189 | #endif | 189 | #endif /* ASM_X86__CHECKSUM_32_H */ |
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index 8bd861cc5267..110f403beb89 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_64_CHECKSUM_H | 1 | #ifndef ASM_X86__CHECKSUM_64_H |
2 | #define _X86_64_CHECKSUM_H | 2 | #define ASM_X86__CHECKSUM_64_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Checksums for x86-64 | 5 | * Checksums for x86-64 |
@@ -188,4 +188,4 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b) | |||
188 | return a; | 188 | return a; |
189 | } | 189 | } |
190 | 190 | ||
191 | #endif | 191 | #endif /* ASM_X86__CHECKSUM_64_H */ |
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index bf5a69d1329e..0622e45cdf7c 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_CMPXCHG_H | 1 | #ifndef ASM_X86__CMPXCHG_32_H |
2 | #define __ASM_CMPXCHG_H | 2 | #define ASM_X86__CMPXCHG_32_H |
3 | 3 | ||
4 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | 4 | #include <linux/bitops.h> /* for LOCK_PREFIX */ |
5 | 5 | ||
@@ -341,4 +341,4 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); | |||
341 | 341 | ||
342 | #endif | 342 | #endif |
343 | 343 | ||
344 | #endif | 344 | #endif /* ASM_X86__CMPXCHG_32_H */ |
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h index 17463ccf8166..63c1a5e61b99 100644 --- a/include/asm-x86/cmpxchg_64.h +++ b/include/asm-x86/cmpxchg_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_CMPXCHG_H | 1 | #ifndef ASM_X86__CMPXCHG_64_H |
2 | #define __ASM_CMPXCHG_H | 2 | #define ASM_X86__CMPXCHG_64_H |
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
5 | 5 | ||
@@ -182,4 +182,4 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
182 | cmpxchg_local((ptr), (o), (n)); \ | 182 | cmpxchg_local((ptr), (o), (n)); \ |
183 | }) | 183 | }) |
184 | 184 | ||
185 | #endif | 185 | #endif /* ASM_X86__CMPXCHG_64_H */ |
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index 1793ac317a30..6732b150949e 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_COMPAT_H | 1 | #ifndef ASM_X86__COMPAT_H |
2 | #define _ASM_X86_64_COMPAT_H | 2 | #define ASM_X86__COMPAT_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Architecture specific compatibility types | 5 | * Architecture specific compatibility types |
@@ -215,4 +215,4 @@ static inline int is_compat_task(void) | |||
215 | return current_thread_info()->status & TS_COMPAT; | 215 | return current_thread_info()->status & TS_COMPAT; |
216 | } | 216 | } |
217 | 217 | ||
218 | #endif /* _ASM_X86_64_COMPAT_H */ | 218 | #endif /* ASM_X86__COMPAT_H */ |
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h index 73f2ea84fd74..83a115083f0d 100644 --- a/include/asm-x86/cpu.h +++ b/include/asm-x86/cpu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_I386_CPU_H_ | 1 | #ifndef ASM_X86__CPU_H |
2 | #define _ASM_I386_CPU_H_ | 2 | #define ASM_X86__CPU_H |
3 | 3 | ||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | #include <linux/cpu.h> | 5 | #include <linux/cpu.h> |
@@ -17,4 +17,4 @@ extern void arch_unregister_cpu(int); | |||
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | DECLARE_PER_CPU(int, cpu_state); | 19 | DECLARE_PER_CPU(int, cpu_state); |
20 | #endif /* _ASM_I386_CPU_H_ */ | 20 | #endif /* ASM_X86__CPU_H */ |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 762f6a6bc707..7ac4d93d20ed 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -1,20 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * Defines x86 CPU feature bits | 2 | * Defines x86 CPU feature bits |
3 | */ | 3 | */ |
4 | #ifndef _ASM_X86_CPUFEATURE_H | 4 | #ifndef ASM_X86__CPUFEATURE_H |
5 | #define _ASM_X86_CPUFEATURE_H | 5 | #define ASM_X86__CPUFEATURE_H |
6 | 6 | ||
7 | #include <asm/required-features.h> | 7 | #include <asm/required-features.h> |
8 | 8 | ||
9 | #define NCAPINTS 8 /* N 32-bit words worth of info */ | 9 | #define NCAPINTS 8 /* N 32-bit words worth of info */ |
10 | 10 | ||
11 | /* | ||
12 | * Note: If the comment begins with a quoted string, that string is used | ||
13 | * in /proc/cpuinfo instead of the macro name. If the string is "", | ||
14 | * this feature bit is not displayed in /proc/cpuinfo at all. | ||
15 | */ | ||
16 | |||
11 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | 17 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
12 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | 18 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
13 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ | 19 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ |
14 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | 20 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
15 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | 21 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
16 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | 22 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
17 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | 23 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
18 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | 24 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
19 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | 25 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ |
20 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | 26 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
@@ -23,22 +29,23 @@ | |||
23 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | 29 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
24 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | 30 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
25 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | 31 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
26 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | 32 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
33 | /* (plus FCMOVcc, FCOMI with FPU) */ | ||
27 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | 34 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
28 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 35 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
29 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 36 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
30 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | 37 | #define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
31 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ | 38 | #define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
32 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 39 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
33 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 40 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
34 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | 41 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
35 | /* of FPU context), and CR4.OSFXSR available */ | 42 | #define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
36 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | 43 | #define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
37 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | 44 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
38 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
39 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | 45 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
40 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | 46 | #define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
41 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | 47 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
48 | #define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ | ||
42 | 49 | ||
43 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | 50 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
44 | /* Don't duplicate feature flags which are redundant with Intel! */ | 51 | /* Don't duplicate feature flags which are redundant with Intel! */ |
@@ -46,7 +53,8 @@ | |||
46 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | 53 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
47 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | 54 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
48 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | 55 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
49 | #define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ | 56 | #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
57 | #define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ | ||
50 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | 58 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
51 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | 59 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
52 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | 60 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
@@ -64,51 +72,77 @@ | |||
64 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | 72 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
65 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | 73 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
66 | /* cpu types for specific tunings: */ | 74 | /* cpu types for specific tunings: */ |
67 | #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ | 75 | #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
68 | #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ | 76 | #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
69 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | 77 | #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
70 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | 78 | #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
71 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 79 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 80 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 81 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 82 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | 83 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | 84 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ | 85 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ | 86 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | 87 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | 88 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 89 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 90 | #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
91 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | ||
92 | #define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ | ||
83 | 93 | ||
84 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 94 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
85 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 95 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
86 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | 96 | #define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
87 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | 97 | #define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
98 | #define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ | ||
99 | #define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ | ||
100 | #define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ | ||
101 | #define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ | ||
88 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | 102 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
89 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | 103 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
104 | #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ | ||
90 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | 105 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ |
106 | #define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ | ||
91 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | 107 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
92 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | 108 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
109 | #define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ | ||
93 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | 110 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
94 | #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ | 111 | #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
112 | #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ | ||
113 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ | ||
114 | #define X86_FEATURE_AES (4*32+25) /* AES instructions */ | ||
115 | #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ | ||
116 | #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ | ||
117 | #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ | ||
95 | 118 | ||
96 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | 119 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
97 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | 120 | #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
98 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | 121 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
99 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | 122 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
100 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | 123 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
101 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ | 124 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
102 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ | 125 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
103 | #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ | 126 | #define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
104 | #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ | 127 | #define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
105 | #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ | 128 | #define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
106 | #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ | 129 | #define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
107 | 130 | ||
108 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | 131 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
109 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | 132 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
110 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | 133 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
111 | #define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */ | 134 | #define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
135 | #define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ | ||
136 | #define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ | ||
137 | #define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ | ||
138 | #define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ | ||
139 | #define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ | ||
140 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | ||
141 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | ||
142 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | ||
143 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | ||
144 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | ||
145 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | ||
112 | 146 | ||
113 | /* | 147 | /* |
114 | * Auxiliary flags: Linux defined - For features scattered in various | 148 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -149,7 +183,7 @@ extern const char * const x86_power_flags[32]; | |||
149 | } while (0) | 183 | } while (0) |
150 | #define setup_force_cpu_cap(bit) do { \ | 184 | #define setup_force_cpu_cap(bit) do { \ |
151 | set_cpu_cap(&boot_cpu_data, bit); \ | 185 | set_cpu_cap(&boot_cpu_data, bit); \ |
152 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ | 186 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ |
153 | } while (0) | 187 | } while (0) |
154 | 188 | ||
155 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 189 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
@@ -190,7 +224,10 @@ extern const char * const x86_power_flags[32]; | |||
190 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 224 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
191 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | 225 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
192 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | 226 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
227 | #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) | ||
193 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) | 228 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
229 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | ||
230 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | ||
194 | 231 | ||
195 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 232 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
196 | # define cpu_has_invlpg 1 | 233 | # define cpu_has_invlpg 1 |
@@ -222,4 +259,4 @@ extern const char * const x86_power_flags[32]; | |||
222 | 259 | ||
223 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ | 260 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
224 | 261 | ||
225 | #endif /* _ASM_X86_CPUFEATURE_H */ | 262 | #endif /* ASM_X86__CPUFEATURE_H */ |
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h index 7515c19d4988..a863ead856f3 100644 --- a/include/asm-x86/current.h +++ b/include/asm-x86/current.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_CURRENT_H | 1 | #ifndef ASM_X86__CURRENT_H |
2 | #define _X86_CURRENT_H | 2 | #define ASM_X86__CURRENT_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
@@ -36,4 +36,4 @@ static __always_inline struct task_struct *get_current(void) | |||
36 | 36 | ||
37 | #define current get_current() | 37 | #define current get_current() |
38 | 38 | ||
39 | #endif /* X86_CURRENT_H */ | 39 | #endif /* ASM_X86__CURRENT_H */ |
diff --git a/include/asm-x86/debugreg.h b/include/asm-x86/debugreg.h index c6344d572b03..ecb6907c3ea4 100644 --- a/include/asm-x86/debugreg.h +++ b/include/asm-x86/debugreg.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_DEBUGREG_H | 1 | #ifndef ASM_X86__DEBUGREG_H |
2 | #define _ASM_X86_DEBUGREG_H | 2 | #define ASM_X86__DEBUGREG_H |
3 | 3 | ||
4 | 4 | ||
5 | /* Indicate the register numbers for a number of the specific | 5 | /* Indicate the register numbers for a number of the specific |
@@ -67,4 +67,4 @@ | |||
67 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ | 67 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ |
68 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ | 68 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ |
69 | 69 | ||
70 | #endif | 70 | #endif /* ASM_X86__DEBUGREG_H */ |
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h index 409a649204aa..8a0da95b4fc5 100644 --- a/include/asm-x86/delay.h +++ b/include/asm-x86/delay.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_DELAY_H | 1 | #ifndef ASM_X86__DELAY_H |
2 | #define _ASM_X86_DELAY_H | 2 | #define ASM_X86__DELAY_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright (C) 1993 Linus Torvalds | 5 | * Copyright (C) 1993 Linus Torvalds |
@@ -28,4 +28,4 @@ extern void __delay(unsigned long loops); | |||
28 | 28 | ||
29 | void use_tsc_delay(void); | 29 | void use_tsc_delay(void); |
30 | 30 | ||
31 | #endif /* _ASM_X86_DELAY_H */ | 31 | #endif /* ASM_X86__DELAY_H */ |
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index a44c4dc70590..b73fea54def2 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_DESC_H_ | 1 | #ifndef ASM_X86__DESC_H |
2 | #define _ASM_DESC_H_ | 2 | #define ASM_X86__DESC_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #include <asm/desc_defs.h> | 5 | #include <asm/desc_defs.h> |
@@ -397,4 +397,4 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist) | |||
397 | 397 | ||
398 | #endif /* __ASSEMBLY__ */ | 398 | #endif /* __ASSEMBLY__ */ |
399 | 399 | ||
400 | #endif | 400 | #endif /* ASM_X86__DESC_H */ |
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index f7bacf357dac..b881db664b46 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Written 2000 by Andi Kleen */ | 1 | /* Written 2000 by Andi Kleen */ |
2 | #ifndef __ARCH_DESC_DEFS_H | 2 | #ifndef ASM_X86__DESC_DEFS_H |
3 | #define __ARCH_DESC_DEFS_H | 3 | #define ASM_X86__DESC_DEFS_H |
4 | 4 | ||
5 | /* | 5 | /* |
6 | * Segment descriptor structure definitions, usable from both x86_64 and i386 | 6 | * Segment descriptor structure definitions, usable from both x86_64 and i386 |
@@ -92,4 +92,4 @@ struct desc_ptr { | |||
92 | 92 | ||
93 | #endif /* !__ASSEMBLY__ */ | 93 | #endif /* !__ASSEMBLY__ */ |
94 | 94 | ||
95 | #endif | 95 | #endif /* ASM_X86__DESC_DEFS_H */ |
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h index 3c034f48fdb0..1bece04c7d9d 100644 --- a/include/asm-x86/device.h +++ b/include/asm-x86/device.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_DEVICE_H | 1 | #ifndef ASM_X86__DEVICE_H |
2 | #define _ASM_X86_DEVICE_H | 2 | #define ASM_X86__DEVICE_H |
3 | 3 | ||
4 | struct dev_archdata { | 4 | struct dev_archdata { |
5 | #ifdef CONFIG_ACPI | 5 | #ifdef CONFIG_ACPI |
@@ -13,4 +13,4 @@ struct dma_mapping_ops *dma_ops; | |||
13 | #endif | 13 | #endif |
14 | }; | 14 | }; |
15 | 15 | ||
16 | #endif /* _ASM_X86_DEVICE_H */ | 16 | #endif /* ASM_X86__DEVICE_H */ |
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index 9a2d644c08ef..f9530f23f1d6 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_DIV64_H | 1 | #ifndef ASM_X86__DIV64_H |
2 | #define _ASM_X86_DIV64_H | 2 | #define ASM_X86__DIV64_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | 5 | ||
@@ -57,4 +57,4 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) | |||
57 | # include <asm-generic/div64.h> | 57 | # include <asm-generic/div64.h> |
58 | #endif /* CONFIG_X86_32 */ | 58 | #endif /* CONFIG_X86_32 */ |
59 | 59 | ||
60 | #endif /* _ASM_X86_DIV64_H */ | 60 | #endif /* ASM_X86__DIV64_H */ |
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index ad9cd6d49bfc..5d200e78bd81 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_DMA_MAPPING_H_ | 1 | #ifndef ASM_X86__DMA_MAPPING_H |
2 | #define _ASM_DMA_MAPPING_H_ | 2 | #define ASM_X86__DMA_MAPPING_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for | 5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for |
@@ -250,4 +250,4 @@ static inline int dma_get_cache_alignment(void) | |||
250 | #define dma_is_consistent(d, h) (1) | 250 | #define dma_is_consistent(d, h) (1) |
251 | 251 | ||
252 | #include <asm-generic/dma-coherent.h> | 252 | #include <asm-generic/dma-coherent.h> |
253 | #endif | 253 | #endif /* ASM_X86__DMA_MAPPING_H */ |
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index ca1098a7e580..c9f7a4eec555 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h | |||
@@ -5,8 +5,8 @@ | |||
5 | * and John Boyd, Nov. 1992. | 5 | * and John Boyd, Nov. 1992. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _ASM_X86_DMA_H | 8 | #ifndef ASM_X86__DMA_H |
9 | #define _ASM_X86_DMA_H | 9 | #define ASM_X86__DMA_H |
10 | 10 | ||
11 | #include <linux/spinlock.h> /* And spinlocks */ | 11 | #include <linux/spinlock.h> /* And spinlocks */ |
12 | #include <asm/io.h> /* need byte IO */ | 12 | #include <asm/io.h> /* need byte IO */ |
@@ -315,4 +315,4 @@ extern int isa_dma_bridge_buggy; | |||
315 | #define isa_dma_bridge_buggy (0) | 315 | #define isa_dma_bridge_buggy (0) |
316 | #endif | 316 | #endif |
317 | 317 | ||
318 | #endif /* _ASM_X86_DMA_H */ | 318 | #endif /* ASM_X86__DMA_H */ |
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h index 58a86571fe0f..1cff6fe81fa5 100644 --- a/include/asm-x86/dmi.h +++ b/include/asm-x86/dmi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_DMI_H | 1 | #ifndef ASM_X86__DMI_H |
2 | #define _ASM_X86_DMI_H | 2 | #define ASM_X86__DMI_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | 5 | ||
@@ -23,4 +23,4 @@ static inline void *dmi_alloc(unsigned len) | |||
23 | #define dmi_ioremap early_ioremap | 23 | #define dmi_ioremap early_ioremap |
24 | #define dmi_iounmap early_iounmap | 24 | #define dmi_iounmap early_iounmap |
25 | 25 | ||
26 | #endif | 26 | #endif /* ASM_X86__DMI_H */ |
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h index 7881368142fa..6b27c686fa10 100644 --- a/include/asm-x86/ds.h +++ b/include/asm-x86/ds.h | |||
@@ -17,8 +17,8 @@ | |||
17 | * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 | 17 | * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef _ASM_X86_DS_H | 20 | #ifndef ASM_X86__DS_H |
21 | #define _ASM_X86_DS_H | 21 | #define ASM_X86__DS_H |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
@@ -69,4 +69,4 @@ extern int ds_write_bts(void *, const struct bts_struct *); | |||
69 | extern unsigned long ds_debugctl_mask(void); | 69 | extern unsigned long ds_debugctl_mask(void); |
70 | extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c); | 70 | extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c); |
71 | 71 | ||
72 | #endif /* _ASM_X86_DS_H */ | 72 | #endif /* ASM_X86__DS_H */ |
diff --git a/include/asm-x86/dwarf2.h b/include/asm-x86/dwarf2.h index 738bb9fb3e53..21d1bc32ad7c 100644 --- a/include/asm-x86/dwarf2.h +++ b/include/asm-x86/dwarf2.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _DWARF2_H | 1 | #ifndef ASM_X86__DWARF2_H |
2 | #define _DWARF2_H | 2 | #define ASM_X86__DWARF2_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #warning "asm/dwarf2.h should be only included in pure assembly files" | 5 | #warning "asm/dwarf2.h should be only included in pure assembly files" |
@@ -58,4 +58,4 @@ | |||
58 | 58 | ||
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #endif | 61 | #endif /* ASM_X86__DWARF2_H */ |
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index 16a31e2c7c57..5abbdec06bd2 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_E820_H | 1 | #ifndef ASM_X86__E820_H |
2 | #define __ASM_E820_H | 2 | #define ASM_X86__E820_H |
3 | #define E820MAP 0x2d0 /* our map */ | 3 | #define E820MAP 0x2d0 /* our map */ |
4 | #define E820MAX 128 /* number of entries in E820MAP */ | 4 | #define E820MAX 128 /* number of entries in E820MAP */ |
5 | 5 | ||
@@ -43,6 +43,7 @@ | |||
43 | #define E820_RESERVED 2 | 43 | #define E820_RESERVED 2 |
44 | #define E820_ACPI 3 | 44 | #define E820_ACPI 3 |
45 | #define E820_NVS 4 | 45 | #define E820_NVS 4 |
46 | #define E820_UNUSABLE 5 | ||
46 | 47 | ||
47 | /* reserved RAM used by kernel itself */ | 48 | /* reserved RAM used by kernel itself */ |
48 | #define E820_RESERVED_KERN 128 | 49 | #define E820_RESERVED_KERN 128 |
@@ -64,6 +65,7 @@ struct e820map { | |||
64 | extern struct e820map e820; | 65 | extern struct e820map e820; |
65 | extern struct e820map e820_saved; | 66 | extern struct e820map e820_saved; |
66 | 67 | ||
68 | extern unsigned long pci_mem_start; | ||
67 | extern int e820_any_mapped(u64 start, u64 end, unsigned type); | 69 | extern int e820_any_mapped(u64 start, u64 end, unsigned type); |
68 | extern int e820_all_mapped(u64 start, u64 end, unsigned type); | 70 | extern int e820_all_mapped(u64 start, u64 end, unsigned type); |
69 | extern void e820_add_region(u64 start, u64 size, int type); | 71 | extern void e820_add_region(u64 start, u64 size, int type); |
@@ -120,6 +122,7 @@ extern void e820_register_active_regions(int nid, unsigned long start_pfn, | |||
120 | extern u64 e820_hole_size(u64 start, u64 end); | 122 | extern u64 e820_hole_size(u64 start, u64 end); |
121 | extern void finish_e820_parsing(void); | 123 | extern void finish_e820_parsing(void); |
122 | extern void e820_reserve_resources(void); | 124 | extern void e820_reserve_resources(void); |
125 | extern void e820_reserve_resources_late(void); | ||
123 | extern void setup_memory_map(void); | 126 | extern void setup_memory_map(void); |
124 | extern char *default_machine_specific_memory_setup(void); | 127 | extern char *default_machine_specific_memory_setup(void); |
125 | extern char *machine_specific_memory_setup(void); | 128 | extern char *machine_specific_memory_setup(void); |
@@ -140,4 +143,4 @@ extern char *memory_setup(void); | |||
140 | #define HIGH_MEMORY (1024*1024) | 143 | #define HIGH_MEMORY (1024*1024) |
141 | #endif /* __KERNEL__ */ | 144 | #endif /* __KERNEL__ */ |
142 | 145 | ||
143 | #endif /* __ASM_E820_H */ | 146 | #endif /* ASM_X86__E820_H */ |
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h index a8088f63a30e..9493c5b27bbd 100644 --- a/include/asm-x86/edac.h +++ b/include/asm-x86/edac.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_EDAC_H | 1 | #ifndef ASM_X86__EDAC_H |
2 | #define _ASM_X86_EDAC_H | 2 | #define ASM_X86__EDAC_H |
3 | 3 | ||
4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | 4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ |
5 | 5 | ||
@@ -15,4 +15,4 @@ static inline void atomic_scrub(void *va, u32 size) | |||
15 | asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); | 15 | asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); |
16 | } | 16 | } |
17 | 17 | ||
18 | #endif | 18 | #endif /* ASM_X86__EDAC_H */ |
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index d4f2b0abe929..ed2de22e8705 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_EFI_H | 1 | #ifndef ASM_X86__EFI_H |
2 | #define _ASM_X86_EFI_H | 2 | #define ASM_X86__EFI_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | 5 | ||
@@ -94,4 +94,4 @@ extern void efi_reserve_early(void); | |||
94 | extern void efi_call_phys_prelog(void); | 94 | extern void efi_call_phys_prelog(void); |
95 | extern void efi_call_phys_epilog(void); | 95 | extern void efi_call_phys_epilog(void); |
96 | 96 | ||
97 | #endif | 97 | #endif /* ASM_X86__EFI_H */ |
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index 7be4733c793e..cd678b2d6a74 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_ELF_H | 1 | #ifndef ASM_X86__ELF_H |
2 | #define _ASM_X86_ELF_H | 2 | #define ASM_X86__ELF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * ELF register definitions.. | 5 | * ELF register definitions.. |
@@ -332,4 +332,4 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack); | |||
332 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | 332 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); |
333 | #define arch_randomize_brk arch_randomize_brk | 333 | #define arch_randomize_brk arch_randomize_brk |
334 | 334 | ||
335 | #endif | 335 | #endif /* ASM_X86__ELF_H */ |
diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h index 8e6aef19f8f0..190d0d8b71e3 100644 --- a/include/asm-x86/emergency-restart.h +++ b/include/asm-x86/emergency-restart.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | 1 | #ifndef ASM_X86__EMERGENCY_RESTART_H |
2 | #define _ASM_EMERGENCY_RESTART_H | 2 | #define ASM_X86__EMERGENCY_RESTART_H |
3 | 3 | ||
4 | enum reboot_type { | 4 | enum reboot_type { |
5 | BOOT_TRIPLE = 't', | 5 | BOOT_TRIPLE = 't', |
@@ -15,4 +15,4 @@ extern enum reboot_type reboot_type; | |||
15 | 15 | ||
16 | extern void machine_emergency_restart(void); | 16 | extern void machine_emergency_restart(void); |
17 | 17 | ||
18 | #endif /* _ASM_EMERGENCY_RESTART_H */ | 18 | #endif /* ASM_X86__EMERGENCY_RESTART_H */ |
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/es7000/apic.h index 0a3fdf930672..bd2c44d1f7ac 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/es7000/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_ES7000_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_ES7000_APIC_H |
3 | 3 | ||
4 | #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) | 4 | #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) |
5 | #define esr_disable (1) | 5 | #define esr_disable (1) |
@@ -10,7 +10,7 @@ static inline int apic_id_registered(void) | |||
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus(void) | 12 | static inline cpumask_t target_cpus(void) |
13 | { | 13 | { |
14 | #if defined CONFIG_ES7000_CLUSTERED_APIC | 14 | #if defined CONFIG_ES7000_CLUSTERED_APIC |
15 | return CPU_MASK_ALL; | 15 | return CPU_MASK_ALL; |
16 | #else | 16 | #else |
@@ -23,24 +23,24 @@ static inline cpumask_t target_cpus(void) | |||
23 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 23 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
24 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 24 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
25 | #define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ | 25 | #define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ |
26 | #define NO_BALANCE_IRQ (1) | 26 | #define NO_BALANCE_IRQ (1) |
27 | #undef WAKE_SECONDARY_VIA_INIT | 27 | #undef WAKE_SECONDARY_VIA_INIT |
28 | #define WAKE_SECONDARY_VIA_MIP | 28 | #define WAKE_SECONDARY_VIA_MIP |
29 | #else | 29 | #else |
30 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 30 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
31 | #define INT_DELIVERY_MODE (dest_Fixed) | 31 | #define INT_DELIVERY_MODE (dest_Fixed) |
32 | #define INT_DEST_MODE (0) /* phys delivery to target procs */ | 32 | #define INT_DEST_MODE (0) /* phys delivery to target procs */ |
33 | #define NO_BALANCE_IRQ (0) | 33 | #define NO_BALANCE_IRQ (0) |
34 | #undef APIC_DEST_LOGICAL | 34 | #undef APIC_DEST_LOGICAL |
35 | #define APIC_DEST_LOGICAL 0x0 | 35 | #define APIC_DEST_LOGICAL 0x0 |
36 | #define WAKE_SECONDARY_VIA_INIT | 36 | #define WAKE_SECONDARY_VIA_INIT |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | 39 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
40 | { | 40 | { |
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | static inline unsigned long check_apicid_present(int bit) | 43 | static inline unsigned long check_apicid_present(int bit) |
44 | { | 44 | { |
45 | return physid_isset(bit, phys_cpu_present_map); | 45 | return physid_isset(bit, phys_cpu_present_map); |
46 | } | 46 | } |
@@ -80,7 +80,7 @@ static inline void setup_apic_routing(void) | |||
80 | { | 80 | { |
81 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 81 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
82 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 82 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
83 | (apic_version[apic] == 0x14) ? | 83 | (apic_version[apic] == 0x14) ? |
84 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); | 84 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); |
85 | } | 85 | } |
86 | 86 | ||
@@ -141,7 +141,7 @@ static inline void setup_portio_remap(void) | |||
141 | extern unsigned int boot_cpu_physical_apicid; | 141 | extern unsigned int boot_cpu_physical_apicid; |
142 | static inline int check_phys_apicid_present(int cpu_physical_apicid) | 142 | static inline int check_phys_apicid_present(int cpu_physical_apicid) |
143 | { | 143 | { |
144 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 144 | boot_cpu_physical_apicid = read_apic_id(); |
145 | return (1); | 145 | return (1); |
146 | } | 146 | } |
147 | 147 | ||
@@ -150,7 +150,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
150 | int num_bits_set; | 150 | int num_bits_set; |
151 | int cpus_found = 0; | 151 | int cpus_found = 0; |
152 | int cpu; | 152 | int cpu; |
153 | int apicid; | 153 | int apicid; |
154 | 154 | ||
155 | num_bits_set = cpus_weight(cpumask); | 155 | num_bits_set = cpus_weight(cpumask); |
156 | /* Return id to all */ | 156 | /* Return id to all */ |
@@ -160,16 +160,16 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
160 | #else | 160 | #else |
161 | return cpu_to_logical_apicid(0); | 161 | return cpu_to_logical_apicid(0); |
162 | #endif | 162 | #endif |
163 | /* | 163 | /* |
164 | * The cpus in the mask must all be on the apic cluster. If are not | 164 | * The cpus in the mask must all be on the apic cluster. If are not |
165 | * on the same apicid cluster return default value of TARGET_CPUS. | 165 | * on the same apicid cluster return default value of TARGET_CPUS. |
166 | */ | 166 | */ |
167 | cpu = first_cpu(cpumask); | 167 | cpu = first_cpu(cpumask); |
168 | apicid = cpu_to_logical_apicid(cpu); | 168 | apicid = cpu_to_logical_apicid(cpu); |
169 | while (cpus_found < num_bits_set) { | 169 | while (cpus_found < num_bits_set) { |
170 | if (cpu_isset(cpu, cpumask)) { | 170 | if (cpu_isset(cpu, cpumask)) { |
171 | int new_apicid = cpu_to_logical_apicid(cpu); | 171 | int new_apicid = cpu_to_logical_apicid(cpu); |
172 | if (apicid_cluster(apicid) != | 172 | if (apicid_cluster(apicid) != |
173 | apicid_cluster(new_apicid)){ | 173 | apicid_cluster(new_apicid)){ |
174 | printk ("%s: Not a valid mask!\n",__FUNCTION__); | 174 | printk ("%s: Not a valid mask!\n",__FUNCTION__); |
175 | #if defined CONFIG_ES7000_CLUSTERED_APIC | 175 | #if defined CONFIG_ES7000_CLUSTERED_APIC |
@@ -191,4 +191,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
191 | return cpuid_apic >> index_msb; | 191 | return cpuid_apic >> index_msb; |
192 | } | 192 | } |
193 | 193 | ||
194 | #endif /* __ASM_MACH_APIC_H */ | 194 | #endif /* __ASM_ES7000_APIC_H */ |
diff --git a/include/asm-x86/es7000/apicdef.h b/include/asm-x86/es7000/apicdef.h new file mode 100644 index 000000000000..8b234a3cb851 --- /dev/null +++ b/include/asm-x86/es7000/apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_ES7000_APICDEF_H | ||
2 | #define __ASM_ES7000_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/mach-es7000/mach_ipi.h b/include/asm-x86/es7000/ipi.h index 5e61bd220b06..632a955fcc0a 100644 --- a/include/asm-x86/mach-es7000/mach_ipi.h +++ b/include/asm-x86/es7000/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_ES7000_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_ES7000_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); |
5 | 5 | ||
@@ -21,4 +21,4 @@ static inline void send_IPI_all(int vector) | |||
21 | send_IPI_mask(cpu_online_map, vector); | 21 | send_IPI_mask(cpu_online_map, vector); |
22 | } | 22 | } |
23 | 23 | ||
24 | #endif /* __ASM_MACH_IPI_H */ | 24 | #endif /* __ASM_ES7000_IPI_H */ |
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/es7000/mpparse.h index ef26d3523625..7b5c889d8e7d 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/es7000/mpparse.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_ES7000_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_ES7000_MPPARSE_H |
3 | 3 | ||
4 | #include <linux/acpi.h> | 4 | #include <linux/acpi.h> |
5 | 5 | ||
diff --git a/include/asm-x86/mach-es7000/mach_wakecpu.h b/include/asm-x86/es7000/wakecpu.h index 84ff58314501..3ffc5a7bf667 100644 --- a/include/asm-x86/mach-es7000/mach_wakecpu.h +++ b/include/asm-x86/es7000/wakecpu.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_MACH_WAKECPU_H | 1 | #ifndef __ASM_ES7000_WAKECPU_H |
2 | #define __ASM_MACH_WAKECPU_H | 2 | #define __ASM_ES7000_WAKECPU_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file copes with machines that wakeup secondary CPUs by the | 5 | * This file copes with machines that wakeup secondary CPUs by the |
6 | * INIT, INIT, STARTUP sequence. | 6 | * INIT, INIT, STARTUP sequence. |
7 | */ | 7 | */ |
diff --git a/include/asm-x86/fb.h b/include/asm-x86/fb.h index 53018464aea6..aca38dbd9a64 100644 --- a/include/asm-x86/fb.h +++ b/include/asm-x86/fb.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_FB_H | 1 | #ifndef ASM_X86__FB_H |
2 | #define _ASM_X86_FB_H | 2 | #define ASM_X86__FB_H |
3 | 3 | ||
4 | #include <linux/fb.h> | 4 | #include <linux/fb.h> |
5 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
@@ -18,4 +18,4 @@ extern int fb_is_primary_device(struct fb_info *info); | |||
18 | static inline int fb_is_primary_device(struct fb_info *info) { return 0; } | 18 | static inline int fb_is_primary_device(struct fb_info *info) { return 0; } |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #endif /* _ASM_X86_FB_H */ | 21 | #endif /* ASM_X86__FB_H */ |
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h index 44d4f8217349..78e33a1bc591 100644 --- a/include/asm-x86/fixmap.h +++ b/include/asm-x86/fixmap.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_FIXMAP_H | 1 | #ifndef ASM_X86__FIXMAP_H |
2 | #define _ASM_FIXMAP_H | 2 | #define ASM_X86__FIXMAP_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | # include "fixmap_32.h" | 5 | # include "fixmap_32.h" |
@@ -65,4 +65,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
65 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | 65 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); |
66 | return __virt_to_fix(vaddr); | 66 | return __virt_to_fix(vaddr); |
67 | } | 67 | } |
68 | #endif | 68 | #endif /* ASM_X86__FIXMAP_H */ |
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index f1ac2b2167d7..784e3e759866 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h | |||
@@ -10,8 +10,8 @@ | |||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | 10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef _ASM_FIXMAP_32_H | 13 | #ifndef ASM_X86__FIXMAP_32_H |
14 | #define _ASM_FIXMAP_32_H | 14 | #define ASM_X86__FIXMAP_32_H |
15 | 15 | ||
16 | 16 | ||
17 | /* used by vmalloc.c, vsyscall.lds.S. | 17 | /* used by vmalloc.c, vsyscall.lds.S. |
@@ -120,4 +120,4 @@ extern void reserve_top_address(unsigned long reserve); | |||
120 | #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) | 120 | #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) |
121 | 121 | ||
122 | #endif /* !__ASSEMBLY__ */ | 122 | #endif /* !__ASSEMBLY__ */ |
123 | #endif | 123 | #endif /* ASM_X86__FIXMAP_32_H */ |
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index 00f3d74a0524..dafb24bc0424 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 1998 Ingo Molnar | 8 | * Copyright (C) 1998 Ingo Molnar |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_FIXMAP_64_H | 11 | #ifndef ASM_X86__FIXMAP_64_H |
12 | #define _ASM_FIXMAP_64_H | 12 | #define ASM_X86__FIXMAP_64_H |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <asm/acpi.h> | 15 | #include <asm/acpi.h> |
@@ -80,4 +80,4 @@ enum fixed_addresses { | |||
80 | #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) | 80 | #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) |
81 | #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) | 81 | #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) |
82 | 82 | ||
83 | #endif | 83 | #endif /* ASM_X86__FIXMAP_64_H */ |
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index dbe82a5c5eac..7d83a3a83e37 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h | |||
@@ -7,8 +7,8 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 1995 | 8 | * Copyright (C) 1995 |
9 | */ | 9 | */ |
10 | #ifndef _ASM_X86_FLOPPY_H | 10 | #ifndef ASM_X86__FLOPPY_H |
11 | #define _ASM_X86_FLOPPY_H | 11 | #define ASM_X86__FLOPPY_H |
12 | 12 | ||
13 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
14 | 14 | ||
@@ -278,4 +278,4 @@ static int FDC2 = -1; | |||
278 | 278 | ||
279 | #define EXTRA_FLOPPY_PARAMS | 279 | #define EXTRA_FLOPPY_PARAMS |
280 | 280 | ||
281 | #endif /* _ASM_X86_FLOPPY_H */ | 281 | #endif /* ASM_X86__FLOPPY_H */ |
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h index 5c68b32ee1c8..be0e004ad148 100644 --- a/include/asm-x86/ftrace.h +++ b/include/asm-x86/ftrace.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_FTRACE | 1 | #ifndef ASM_X86__FTRACE_H |
2 | #define _ASM_X86_FTRACE | 2 | #define ASM_X86__FTRACE_H |
3 | 3 | ||
4 | #ifdef CONFIG_FTRACE | 4 | #ifdef CONFIG_FTRACE |
5 | #define MCOUNT_ADDR ((long)(mcount)) | 5 | #define MCOUNT_ADDR ((long)(mcount)) |
@@ -11,4 +11,4 @@ extern void mcount(void); | |||
11 | 11 | ||
12 | #endif /* CONFIG_FTRACE */ | 12 | #endif /* CONFIG_FTRACE */ |
13 | 13 | ||
14 | #endif /* _ASM_X86_FTRACE */ | 14 | #endif /* ASM_X86__FTRACE_H */ |
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index e7a76b37b333..45dc24d84186 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_FUTEX_H | 1 | #ifndef ASM_X86__FUTEX_H |
2 | #define _ASM_X86_FUTEX_H | 2 | #define ASM_X86__FUTEX_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
@@ -137,4 +137,4 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | #endif | 139 | #endif |
140 | #endif | 140 | #endif /* ASM_X86__FUTEX_H */ |
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index 3f62a83887f3..07f445844146 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X8664_GART_H | 1 | #ifndef ASM_X86__GART_H |
2 | #define _ASM_X8664_GART_H 1 | 2 | #define ASM_X86__GART_H |
3 | 3 | ||
4 | #include <asm/e820.h> | 4 | #include <asm/e820.h> |
5 | 5 | ||
@@ -68,4 +68,4 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) | |||
68 | return 1; | 68 | return 1; |
69 | } | 69 | } |
70 | 70 | ||
71 | #endif | 71 | #endif /* ASM_X86__GART_H */ |
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 754d635f90ff..34280f027664 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_GENAPIC_H | 1 | #ifndef ASM_X86__GENAPIC_32_H |
2 | #define _ASM_GENAPIC_H 1 | 2 | #define ASM_X86__GENAPIC_32_H |
3 | 3 | ||
4 | #include <asm/mpspec.h> | 4 | #include <asm/mpspec.h> |
5 | 5 | ||
@@ -121,4 +121,4 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | |||
121 | #define uv_system_init() do {} while (0) | 121 | #define uv_system_init() do {} while (0) |
122 | 122 | ||
123 | 123 | ||
124 | #endif | 124 | #endif /* ASM_X86__GENAPIC_32_H */ |
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index a47d63129135..ed6a4886c082 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_GENAPIC_H | 1 | #ifndef ASM_X86__GENAPIC_64_H |
2 | #define _ASM_GENAPIC_H 1 | 2 | #define ASM_X86__GENAPIC_64_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 2004 James Cleverdon, IBM. | 5 | * Copyright 2004 James Cleverdon, IBM. |
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | struct genapic { | 15 | struct genapic { |
16 | char *name; | 16 | char *name; |
17 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); | ||
17 | u32 int_delivery_mode; | 18 | u32 int_delivery_mode; |
18 | u32 int_dest_mode; | 19 | u32 int_dest_mode; |
19 | int (*apic_id_registered)(void); | 20 | int (*apic_id_registered)(void); |
@@ -24,17 +25,24 @@ struct genapic { | |||
24 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 25 | void (*send_IPI_mask)(cpumask_t mask, int vector); |
25 | void (*send_IPI_allbutself)(int vector); | 26 | void (*send_IPI_allbutself)(int vector); |
26 | void (*send_IPI_all)(int vector); | 27 | void (*send_IPI_all)(int vector); |
28 | void (*send_IPI_self)(int vector); | ||
27 | /* */ | 29 | /* */ |
28 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 30 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); |
29 | unsigned int (*phys_pkg_id)(int index_msb); | 31 | unsigned int (*phys_pkg_id)(int index_msb); |
32 | unsigned int (*get_apic_id)(unsigned long x); | ||
33 | unsigned long (*set_apic_id)(unsigned int id); | ||
34 | unsigned long apic_id_mask; | ||
30 | }; | 35 | }; |
31 | 36 | ||
32 | extern struct genapic *genapic; | 37 | extern struct genapic *genapic; |
33 | 38 | ||
34 | extern struct genapic apic_flat; | 39 | extern struct genapic apic_flat; |
35 | extern struct genapic apic_physflat; | 40 | extern struct genapic apic_physflat; |
41 | extern struct genapic apic_x2apic_cluster; | ||
42 | extern struct genapic apic_x2apic_phys; | ||
36 | extern int acpi_madt_oem_check(char *, char *); | 43 | extern int acpi_madt_oem_check(char *, char *); |
37 | 44 | ||
45 | extern void apic_send_IPI_self(int vector); | ||
38 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | 46 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; |
39 | extern enum uv_system_type get_uv_system_type(void); | 47 | extern enum uv_system_type get_uv_system_type(void); |
40 | extern int is_uv_system(void); | 48 | extern int is_uv_system(void); |
@@ -47,4 +55,4 @@ extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); | |||
47 | 55 | ||
48 | extern void setup_apic_routing(void); | 56 | extern void setup_apic_routing(void); |
49 | 57 | ||
50 | #endif | 58 | #endif /* ASM_X86__GENAPIC_64_H */ |
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 2c1cda0b8a86..3f3444be2638 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h | |||
@@ -7,8 +7,8 @@ | |||
7 | * as published by the Free Software Foundation. | 7 | * as published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef _ASM_GEODE_H_ | 10 | #ifndef ASM_X86__GEODE_H |
11 | #define _ASM_GEODE_H_ | 11 | #define ASM_X86__GEODE_H |
12 | 12 | ||
13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
@@ -250,4 +250,4 @@ extern int __init mfgpt_timer_setup(void); | |||
250 | static inline int mfgpt_timer_setup(void) { return 0; } | 250 | static inline int mfgpt_timer_setup(void) { return 0; } |
251 | #endif | 251 | #endif |
252 | 252 | ||
253 | #endif | 253 | #endif /* ASM_X86__GEODE_H */ |
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h index c4c91b37c104..497fb980d962 100644 --- a/include/asm-x86/gpio.h +++ b/include/asm-x86/gpio.h | |||
@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) | |||
53 | 53 | ||
54 | #endif /* CONFIG_GPIOLIB */ | 54 | #endif /* CONFIG_GPIOLIB */ |
55 | 55 | ||
56 | #endif /* _ASM_I386_GPIO_H */ | 56 | #endif /* ASM_X86__GPIO_H */ |
diff --git a/include/asm-x86/hardirq_32.h b/include/asm-x86/hardirq_32.h index 4f85f0f4b563..700fe230d919 100644 --- a/include/asm-x86/hardirq_32.h +++ b/include/asm-x86/hardirq_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_HARDIRQ_H | 1 | #ifndef ASM_X86__HARDIRQ_32_H |
2 | #define __ASM_HARDIRQ_H | 2 | #define ASM_X86__HARDIRQ_32_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
@@ -25,4 +25,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); | |||
25 | void ack_bad_irq(unsigned int irq); | 25 | void ack_bad_irq(unsigned int irq); |
26 | #include <linux/irq_cpustat.h> | 26 | #include <linux/irq_cpustat.h> |
27 | 27 | ||
28 | #endif /* __ASM_HARDIRQ_H */ | 28 | #endif /* ASM_X86__HARDIRQ_32_H */ |
diff --git a/include/asm-x86/hardirq_64.h b/include/asm-x86/hardirq_64.h index 95d5e090ed89..f8bd2919a8ce 100644 --- a/include/asm-x86/hardirq_64.h +++ b/include/asm-x86/hardirq_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_HARDIRQ_H | 1 | #ifndef ASM_X86__HARDIRQ_64_H |
2 | #define __ASM_HARDIRQ_H | 2 | #define ASM_X86__HARDIRQ_64_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
@@ -20,4 +20,4 @@ | |||
20 | 20 | ||
21 | extern void ack_bad_irq(unsigned int irq); | 21 | extern void ack_bad_irq(unsigned int irq); |
22 | 22 | ||
23 | #endif /* __ASM_HARDIRQ_H */ | 23 | #endif /* ASM_X86__HARDIRQ_64_H */ |
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 4514b16cc723..bc3f6a280316 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h | |||
@@ -15,8 +15,8 @@ | |||
15 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | 15 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef _ASM_HIGHMEM_H | 18 | #ifndef ASM_X86__HIGHMEM_H |
19 | #define _ASM_HIGHMEM_H | 19 | #define ASM_X86__HIGHMEM_H |
20 | 20 | ||
21 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
22 | 22 | ||
@@ -79,4 +79,4 @@ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |||
79 | 79 | ||
80 | #endif /* __KERNEL__ */ | 80 | #endif /* __KERNEL__ */ |
81 | 81 | ||
82 | #endif /* _ASM_HIGHMEM_H */ | 82 | #endif /* ASM_X86__HIGHMEM_H */ |
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h index 82f1ac641bd7..cbbbb6d4dd32 100644 --- a/include/asm-x86/hpet.h +++ b/include/asm-x86/hpet.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86_HPET_H | 1 | #ifndef ASM_X86__HPET_H |
2 | #define ASM_X86_HPET_H | 2 | #define ASM_X86__HPET_H |
3 | 3 | ||
4 | #ifdef CONFIG_HPET_TIMER | 4 | #ifdef CONFIG_HPET_TIMER |
5 | 5 | ||
@@ -90,4 +90,4 @@ static inline int is_hpet_enabled(void) { return 0; } | |||
90 | #define hpet_readl(a) 0 | 90 | #define hpet_readl(a) 0 |
91 | 91 | ||
92 | #endif | 92 | #endif |
93 | #endif /* ASM_X86_HPET_H */ | 93 | #endif /* ASM_X86__HPET_H */ |
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h index 439a9acc132d..0b7ec5dc0884 100644 --- a/include/asm-x86/hugetlb.h +++ b/include/asm-x86/hugetlb.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_HUGETLB_H | 1 | #ifndef ASM_X86__HUGETLB_H |
2 | #define _ASM_X86_HUGETLB_H | 2 | #define ASM_X86__HUGETLB_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | 5 | ||
@@ -90,4 +90,4 @@ static inline void arch_release_hugepage(struct page *page) | |||
90 | { | 90 | { |
91 | } | 91 | } |
92 | 92 | ||
93 | #endif /* _ASM_X86_HUGETLB_H */ | 93 | #endif /* ASM_X86__HUGETLB_H */ |
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index edd0b95f14d0..50f6e0316b50 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_HW_IRQ_H | 1 | #ifndef ASM_X86__HW_IRQ_H |
2 | #define _ASM_HW_IRQ_H | 2 | #define ASM_X86__HW_IRQ_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | 5 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar |
@@ -64,7 +64,6 @@ extern unsigned long io_apic_irqs; | |||
64 | extern void init_VISWS_APIC_irqs(void); | 64 | extern void init_VISWS_APIC_irqs(void); |
65 | extern void setup_IO_APIC(void); | 65 | extern void setup_IO_APIC(void); |
66 | extern void disable_IO_APIC(void); | 66 | extern void disable_IO_APIC(void); |
67 | extern void print_IO_APIC(void); | ||
68 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | 67 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); |
69 | extern void setup_ioapic_dest(void); | 68 | extern void setup_ioapic_dest(void); |
70 | 69 | ||
@@ -73,7 +72,9 @@ extern void enable_IO_APIC(void); | |||
73 | #endif | 72 | #endif |
74 | 73 | ||
75 | /* IPI functions */ | 74 | /* IPI functions */ |
75 | #ifdef CONFIG_X86_32 | ||
76 | extern void send_IPI_self(int vector); | 76 | extern void send_IPI_self(int vector); |
77 | #endif | ||
77 | extern void send_IPI(int dest, int vector); | 78 | extern void send_IPI(int dest, int vector); |
78 | 79 | ||
79 | /* Statistics */ | 80 | /* Statistics */ |
@@ -93,6 +94,26 @@ extern asmlinkage void qic_reschedule_interrupt(void); | |||
93 | extern asmlinkage void qic_enable_irq_interrupt(void); | 94 | extern asmlinkage void qic_enable_irq_interrupt(void); |
94 | extern asmlinkage void qic_call_function_interrupt(void); | 95 | extern asmlinkage void qic_call_function_interrupt(void); |
95 | 96 | ||
97 | /* SMP */ | ||
98 | extern void smp_apic_timer_interrupt(struct pt_regs *); | ||
99 | #ifdef CONFIG_X86_32 | ||
100 | extern void smp_spurious_interrupt(struct pt_regs *); | ||
101 | extern void smp_error_interrupt(struct pt_regs *); | ||
102 | #else | ||
103 | extern asmlinkage void smp_spurious_interrupt(void); | ||
104 | extern asmlinkage void smp_error_interrupt(void); | ||
105 | #endif | ||
106 | #ifdef CONFIG_X86_SMP | ||
107 | extern void smp_reschedule_interrupt(struct pt_regs *); | ||
108 | extern void smp_call_function_interrupt(struct pt_regs *); | ||
109 | extern void smp_call_function_single_interrupt(struct pt_regs *); | ||
110 | #ifdef CONFIG_X86_32 | ||
111 | extern void smp_invalidate_interrupt(struct pt_regs *); | ||
112 | #else | ||
113 | extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); | ||
114 | #endif | ||
115 | #endif | ||
116 | |||
96 | #ifdef CONFIG_X86_32 | 117 | #ifdef CONFIG_X86_32 |
97 | extern void (*const interrupt[NR_IRQS])(void); | 118 | extern void (*const interrupt[NR_IRQS])(void); |
98 | #else | 119 | #else |
@@ -112,4 +133,4 @@ static inline void __setup_vector_irq(int cpu) {} | |||
112 | 133 | ||
113 | #endif /* !ASSEMBLY_ */ | 134 | #endif /* !ASSEMBLY_ */ |
114 | 135 | ||
115 | #endif | 136 | #endif /* ASM_X86__HW_IRQ_H */ |
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h index d2bbd238b3e1..cc011a3bc1c2 100644 --- a/include/asm-x86/hypertransport.h +++ b/include/asm-x86/hypertransport.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_HYPERTRANSPORT_H | 1 | #ifndef ASM_X86__HYPERTRANSPORT_H |
2 | #define ASM_HYPERTRANSPORT_H | 2 | #define ASM_X86__HYPERTRANSPORT_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Constants for x86 Hypertransport Interrupts. | 5 | * Constants for x86 Hypertransport Interrupts. |
@@ -42,4 +42,4 @@ | |||
42 | #define HT_IRQ_HIGH_DEST_ID(v) \ | 42 | #define HT_IRQ_HIGH_DEST_ID(v) \ |
43 | ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) | 43 | ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) |
44 | 44 | ||
45 | #endif /* ASM_HYPERTRANSPORT_H */ | 45 | #endif /* ASM_X86__HYPERTRANSPORT_H */ |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 56d00e31aec0..9ba862a4eac0 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -7,8 +7,8 @@ | |||
7 | * x86-64 work by Andi Kleen 2002 | 7 | * x86-64 work by Andi Kleen 2002 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef _ASM_X86_I387_H | 10 | #ifndef ASM_X86__I387_H |
11 | #define _ASM_X86_I387_H | 11 | #define ASM_X86__I387_H |
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
@@ -19,23 +19,32 @@ | |||
19 | #include <asm/sigcontext.h> | 19 | #include <asm/sigcontext.h> |
20 | #include <asm/user.h> | 20 | #include <asm/user.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/xsave.h> | ||
22 | 23 | ||
24 | extern unsigned int sig_xstate_size; | ||
23 | extern void fpu_init(void); | 25 | extern void fpu_init(void); |
24 | extern void mxcsr_feature_mask_init(void); | 26 | extern void mxcsr_feature_mask_init(void); |
25 | extern int init_fpu(struct task_struct *child); | 27 | extern int init_fpu(struct task_struct *child); |
26 | extern asmlinkage void math_state_restore(void); | 28 | extern asmlinkage void math_state_restore(void); |
27 | extern void init_thread_xstate(void); | 29 | extern void init_thread_xstate(void); |
30 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | ||
28 | 31 | ||
29 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 32 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
30 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | 33 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; |
31 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | 34 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; |
32 | 35 | ||
36 | extern struct _fpx_sw_bytes fx_sw_reserved; | ||
33 | #ifdef CONFIG_IA32_EMULATION | 37 | #ifdef CONFIG_IA32_EMULATION |
38 | extern unsigned int sig_xstate_ia32_size; | ||
39 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
34 | struct _fpstate_ia32; | 40 | struct _fpstate_ia32; |
35 | extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); | 41 | struct _xstate_ia32; |
36 | extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); | 42 | extern int save_i387_xstate_ia32(void __user *buf); |
43 | extern int restore_i387_xstate_ia32(void __user *buf); | ||
37 | #endif | 44 | #endif |
38 | 45 | ||
46 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
47 | |||
39 | #ifdef CONFIG_X86_64 | 48 | #ifdef CONFIG_X86_64 |
40 | 49 | ||
41 | /* Ignore delayed exceptions from user space */ | 50 | /* Ignore delayed exceptions from user space */ |
@@ -46,7 +55,7 @@ static inline void tolerant_fwait(void) | |||
46 | _ASM_EXTABLE(1b, 2b)); | 55 | _ASM_EXTABLE(1b, 2b)); |
47 | } | 56 | } |
48 | 57 | ||
49 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 58 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
50 | { | 59 | { |
51 | int err; | 60 | int err; |
52 | 61 | ||
@@ -66,15 +75,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
66 | return err; | 75 | return err; |
67 | } | 76 | } |
68 | 77 | ||
69 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 78 | static inline int restore_fpu_checking(struct task_struct *tsk) |
79 | { | ||
80 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
81 | return xrstor_checking(&tsk->thread.xstate->xsave); | ||
82 | else | ||
83 | return fxrstor_checking(&tsk->thread.xstate->fxsave); | ||
84 | } | ||
70 | 85 | ||
71 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | 86 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception |
72 | is pending. Clear the x87 state here by setting it to fixed | 87 | is pending. Clear the x87 state here by setting it to fixed |
73 | values. The kernel data segment can be sometimes 0 and sometimes | 88 | values. The kernel data segment can be sometimes 0 and sometimes |
74 | new user value. Both should be ok. | 89 | new user value. Both should be ok. |
75 | Use the PDA as safe address because it should be already in L1. */ | 90 | Use the PDA as safe address because it should be already in L1. */ |
76 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | 91 | static inline void clear_fpu_state(struct task_struct *tsk) |
77 | { | 92 | { |
93 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | ||
94 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | ||
95 | |||
96 | /* | ||
97 | * xsave header may indicate the init state of the FP. | ||
98 | */ | ||
99 | if ((task_thread_info(tsk)->status & TS_XSAVE) && | ||
100 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
101 | return; | ||
102 | |||
78 | if (unlikely(fx->swd & X87_FSW_ES)) | 103 | if (unlikely(fx->swd & X87_FSW_ES)) |
79 | asm volatile("fnclex"); | 104 | asm volatile("fnclex"); |
80 | alternative_input(ASM_NOP8 ASM_NOP2, | 105 | alternative_input(ASM_NOP8 ASM_NOP2, |
@@ -83,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | |||
83 | X86_FEATURE_FXSAVE_LEAK); | 108 | X86_FEATURE_FXSAVE_LEAK); |
84 | } | 109 | } |
85 | 110 | ||
86 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | 111 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
87 | { | 112 | { |
88 | int err; | 113 | int err; |
89 | 114 | ||
@@ -107,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
107 | return err; | 132 | return err; |
108 | } | 133 | } |
109 | 134 | ||
110 | static inline void __save_init_fpu(struct task_struct *tsk) | 135 | static inline void fxsave(struct task_struct *tsk) |
111 | { | 136 | { |
112 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | 137 | /* Using "rex64; fxsave %0" is broken because, if the memory operand |
113 | uses any extended registers for addressing, a second REX prefix | 138 | uses any extended registers for addressing, a second REX prefix |
@@ -132,7 +157,16 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
132 | : "=m" (tsk->thread.xstate->fxsave) | 157 | : "=m" (tsk->thread.xstate->fxsave) |
133 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); | 158 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); |
134 | #endif | 159 | #endif |
135 | clear_fpu_state(&tsk->thread.xstate->fxsave); | 160 | } |
161 | |||
162 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
163 | { | ||
164 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
165 | xsave(tsk); | ||
166 | else | ||
167 | fxsave(tsk); | ||
168 | |||
169 | clear_fpu_state(tsk); | ||
136 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 170 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
137 | } | 171 | } |
138 | 172 | ||
@@ -147,6 +181,10 @@ static inline void tolerant_fwait(void) | |||
147 | 181 | ||
148 | static inline void restore_fpu(struct task_struct *tsk) | 182 | static inline void restore_fpu(struct task_struct *tsk) |
149 | { | 183 | { |
184 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
185 | xrstor_checking(&tsk->thread.xstate->xsave); | ||
186 | return; | ||
187 | } | ||
150 | /* | 188 | /* |
151 | * The "nop" is needed to make the instructions the same | 189 | * The "nop" is needed to make the instructions the same |
152 | * length. | 190 | * length. |
@@ -172,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk) | |||
172 | */ | 210 | */ |
173 | static inline void __save_init_fpu(struct task_struct *tsk) | 211 | static inline void __save_init_fpu(struct task_struct *tsk) |
174 | { | 212 | { |
213 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
214 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | ||
215 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | ||
216 | |||
217 | xsave(tsk); | ||
218 | |||
219 | /* | ||
220 | * xsave header may indicate the init state of the FP. | ||
221 | */ | ||
222 | if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
223 | goto end; | ||
224 | |||
225 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
226 | asm volatile("fnclex"); | ||
227 | |||
228 | /* | ||
229 | * we can do a simple return here or be paranoid :) | ||
230 | */ | ||
231 | goto clear_state; | ||
232 | } | ||
233 | |||
175 | /* Use more nops than strictly needed in case the compiler | 234 | /* Use more nops than strictly needed in case the compiler |
176 | varies code */ | 235 | varies code */ |
177 | alternative_input( | 236 | alternative_input( |
@@ -181,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
181 | X86_FEATURE_FXSR, | 240 | X86_FEATURE_FXSR, |
182 | [fx] "m" (tsk->thread.xstate->fxsave), | 241 | [fx] "m" (tsk->thread.xstate->fxsave), |
183 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); | 242 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); |
243 | clear_state: | ||
184 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 244 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
185 | is pending. Clear the x87 state here by setting it to fixed | 245 | is pending. Clear the x87 state here by setting it to fixed |
186 | values. safe_address is a random variable that should be in L1 */ | 246 | values. safe_address is a random variable that should be in L1 */ |
@@ -190,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
190 | "fildl %[addr]", /* set F?P to defined value */ | 250 | "fildl %[addr]", /* set F?P to defined value */ |
191 | X86_FEATURE_FXSAVE_LEAK, | 251 | X86_FEATURE_FXSAVE_LEAK, |
192 | [addr] "m" (safe_address)); | 252 | [addr] "m" (safe_address)); |
253 | end: | ||
193 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 254 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
194 | } | 255 | } |
195 | 256 | ||
257 | #endif /* CONFIG_X86_64 */ | ||
258 | |||
196 | /* | 259 | /* |
197 | * Signal frame handlers... | 260 | * Signal frame handlers... |
198 | */ | 261 | */ |
199 | extern int save_i387(struct _fpstate __user *buf); | 262 | extern int save_i387_xstate(void __user *buf); |
200 | extern int restore_i387(struct _fpstate __user *buf); | 263 | extern int restore_i387_xstate(void __user *buf); |
201 | |||
202 | #endif /* CONFIG_X86_64 */ | ||
203 | 264 | ||
204 | static inline void __unlazy_fpu(struct task_struct *tsk) | 265 | static inline void __unlazy_fpu(struct task_struct *tsk) |
205 | { | 266 | { |
@@ -336,4 +397,4 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |||
336 | } | 397 | } |
337 | } | 398 | } |
338 | 399 | ||
339 | #endif /* _ASM_X86_I387_H */ | 400 | #endif /* ASM_X86__I387_H */ |
diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h index b51c0487fc41..15a5b530044e 100644 --- a/include/asm-x86/i8253.h +++ b/include/asm-x86/i8253.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_I8253_H__ | 1 | #ifndef ASM_X86__I8253_H |
2 | #define __ASM_I8253_H__ | 2 | #define ASM_X86__I8253_H |
3 | 3 | ||
4 | /* i8253A PIT registers */ | 4 | /* i8253A PIT registers */ |
5 | #define PIT_MODE 0x43 | 5 | #define PIT_MODE 0x43 |
@@ -15,4 +15,4 @@ extern void setup_pit_timer(void); | |||
15 | #define inb_pit inb_p | 15 | #define inb_pit inb_p |
16 | #define outb_pit outb_p | 16 | #define outb_pit outb_p |
17 | 17 | ||
18 | #endif /* __ASM_I8253_H__ */ | 18 | #endif /* ASM_X86__I8253_H */ |
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 2f98df91f1f2..23c1b3baaecd 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_I8259_H__ | 1 | #ifndef ASM_X86__I8259_H |
2 | #define __ASM_I8259_H__ | 2 | #define ASM_X86__I8259_H |
3 | 3 | ||
4 | #include <linux/delay.h> | 4 | #include <linux/delay.h> |
5 | 5 | ||
@@ -57,4 +57,7 @@ static inline void outb_pic(unsigned char value, unsigned int port) | |||
57 | 57 | ||
58 | extern struct irq_chip i8259A_chip; | 58 | extern struct irq_chip i8259A_chip; |
59 | 59 | ||
60 | #endif /* __ASM_I8259_H__ */ | 60 | extern void mask_8259A(void); |
61 | extern void unmask_8259A(void); | ||
62 | |||
63 | #endif /* ASM_X86__I8259_H */ | ||
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index 55d3abe5276f..f932f7ad51dd 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_IA32_H | 1 | #ifndef ASM_X86__IA32_H |
2 | #define _ASM_X86_64_IA32_H | 2 | #define ASM_X86__IA32_H |
3 | 3 | ||
4 | 4 | ||
5 | #ifdef CONFIG_IA32_EMULATION | 5 | #ifdef CONFIG_IA32_EMULATION |
@@ -167,4 +167,4 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm); | |||
167 | 167 | ||
168 | #endif /* !CONFIG_IA32_SUPPORT */ | 168 | #endif /* !CONFIG_IA32_SUPPORT */ |
169 | 169 | ||
170 | #endif | 170 | #endif /* ASM_X86__IA32_H */ |
diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h index 61cea9e7c5c1..dbd887d8a5a5 100644 --- a/include/asm-x86/ia32_unistd.h +++ b/include/asm-x86/ia32_unistd.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_IA32_UNISTD_H_ | 1 | #ifndef ASM_X86__IA32_UNISTD_H |
2 | #define _ASM_X86_64_IA32_UNISTD_H_ | 2 | #define ASM_X86__IA32_UNISTD_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file contains the system call numbers of the ia32 port, | 5 | * This file contains the system call numbers of the ia32 port, |
@@ -15,4 +15,4 @@ | |||
15 | #define __NR_ia32_sigreturn 119 | 15 | #define __NR_ia32_sigreturn 119 |
16 | #define __NR_ia32_rt_sigreturn 173 | 16 | #define __NR_ia32_rt_sigreturn 173 |
17 | 17 | ||
18 | #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ | 18 | #endif /* ASM_X86__IA32_UNISTD_H */ |
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index d240e5b30a45..dc9c7944847b 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_IDLE_H | 1 | #ifndef ASM_X86__IDLE_H |
2 | #define _ASM_X86_64_IDLE_H 1 | 2 | #define ASM_X86__IDLE_H |
3 | 3 | ||
4 | #define IDLE_START 1 | 4 | #define IDLE_START 1 |
5 | #define IDLE_END 2 | 5 | #define IDLE_END 2 |
@@ -10,4 +10,4 @@ void idle_notifier_register(struct notifier_block *n); | |||
10 | void enter_idle(void); | 10 | void enter_idle(void); |
11 | void exit_idle(void); | 11 | void exit_idle(void); |
12 | 12 | ||
13 | #endif | 13 | #endif /* ASM_X86__IDLE_H */ |
diff --git a/include/asm-x86/intel_arch_perfmon.h b/include/asm-x86/intel_arch_perfmon.h index fa0fd068bc2e..07c03c6c9a16 100644 --- a/include/asm-x86/intel_arch_perfmon.h +++ b/include/asm-x86/intel_arch_perfmon.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_INTEL_ARCH_PERFMON_H | 1 | #ifndef ASM_X86__INTEL_ARCH_PERFMON_H |
2 | #define _ASM_X86_INTEL_ARCH_PERFMON_H | 2 | #define ASM_X86__INTEL_ARCH_PERFMON_H |
3 | 3 | ||
4 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | 4 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 |
5 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | 5 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 |
@@ -28,4 +28,4 @@ union cpuid10_eax { | |||
28 | unsigned int full; | 28 | unsigned int full; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | #endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ | 31 | #endif /* ASM_X86__INTEL_ARCH_PERFMON_H */ |
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 0f954dc89cb3..72b7719523bf 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_IO_H | 1 | #ifndef ASM_X86__IO_H |
2 | #define _ASM_X86_IO_H | 2 | #define ASM_X86__IO_H |
3 | 3 | ||
4 | #define ARCH_HAS_IOREMAP_WC | 4 | #define ARCH_HAS_IOREMAP_WC |
5 | 5 | ||
@@ -73,6 +73,8 @@ build_mmio_write(__writeq, "q", unsigned long, "r", ) | |||
73 | #define writeq writeq | 73 | #define writeq writeq |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | extern int iommu_bio_merge; | ||
77 | |||
76 | #ifdef CONFIG_X86_32 | 78 | #ifdef CONFIG_X86_32 |
77 | # include "io_32.h" | 79 | # include "io_32.h" |
78 | #else | 80 | #else |
@@ -99,4 +101,4 @@ extern void early_iounmap(void *addr, unsigned long size); | |||
99 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | 101 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); |
100 | 102 | ||
101 | 103 | ||
102 | #endif /* _ASM_X86_IO_H */ | 104 | #endif /* ASM_X86__IO_H */ |
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index e876d89ac156..4f7d878bda18 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_IO_H | 1 | #ifndef ASM_X86__IO_32_H |
2 | #define _ASM_IO_H | 2 | #define ASM_X86__IO_32_H |
3 | 3 | ||
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
@@ -281,4 +281,4 @@ BUILDIO(b, b, char) | |||
281 | BUILDIO(w, w, short) | 281 | BUILDIO(w, w, short) |
282 | BUILDIO(l, , int) | 282 | BUILDIO(l, , int) |
283 | 283 | ||
284 | #endif | 284 | #endif /* ASM_X86__IO_32_H */ |
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index 22995c5c5adc..64429e9431a8 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_IO_H | 1 | #ifndef ASM_X86__IO_64_H |
2 | #define _ASM_IO_H | 2 | #define ASM_X86__IO_64_H |
3 | 3 | ||
4 | 4 | ||
5 | /* | 5 | /* |
@@ -235,7 +235,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c); | |||
235 | 235 | ||
236 | #define flush_write_buffers() | 236 | #define flush_write_buffers() |
237 | 237 | ||
238 | extern int iommu_bio_merge; | ||
239 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge | 238 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge |
240 | 239 | ||
241 | /* | 240 | /* |
@@ -245,4 +244,4 @@ extern int iommu_bio_merge; | |||
245 | 244 | ||
246 | #endif /* __KERNEL__ */ | 245 | #endif /* __KERNEL__ */ |
247 | 246 | ||
248 | #endif | 247 | #endif /* ASM_X86__IO_64_H */ |
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 14f82bbcb5fd..8ec68a50cf10 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_IO_APIC_H | 1 | #ifndef ASM_X86__IO_APIC_H |
2 | #define __ASM_IO_APIC_H | 2 | #define ASM_X86__IO_APIC_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/mpspec.h> | 5 | #include <asm/mpspec.h> |
@@ -107,6 +107,20 @@ struct IO_APIC_route_entry { | |||
107 | 107 | ||
108 | } __attribute__ ((packed)); | 108 | } __attribute__ ((packed)); |
109 | 109 | ||
110 | struct IR_IO_APIC_route_entry { | ||
111 | __u64 vector : 8, | ||
112 | zero : 3, | ||
113 | index2 : 1, | ||
114 | delivery_status : 1, | ||
115 | polarity : 1, | ||
116 | irr : 1, | ||
117 | trigger : 1, | ||
118 | mask : 1, | ||
119 | reserved : 31, | ||
120 | format : 1, | ||
121 | index : 15; | ||
122 | } __attribute__ ((packed)); | ||
123 | |||
110 | #ifdef CONFIG_X86_IO_APIC | 124 | #ifdef CONFIG_X86_IO_APIC |
111 | 125 | ||
112 | /* | 126 | /* |
@@ -183,10 +197,16 @@ extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, | |||
183 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 197 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
184 | extern void ioapic_init_mappings(void); | 198 | extern void ioapic_init_mappings(void); |
185 | 199 | ||
200 | #ifdef CONFIG_X86_64 | ||
201 | extern int save_mask_IO_APIC_setup(void); | ||
202 | extern void restore_IO_APIC_setup(void); | ||
203 | extern void reinit_intr_remapped_IO_APIC(int); | ||
204 | #endif | ||
205 | |||
186 | #else /* !CONFIG_X86_IO_APIC */ | 206 | #else /* !CONFIG_X86_IO_APIC */ |
187 | #define io_apic_assign_pci_irqs 0 | 207 | #define io_apic_assign_pci_irqs 0 |
188 | static const int timer_through_8259 = 0; | 208 | static const int timer_through_8259 = 0; |
189 | static inline void ioapic_init_mappings(void) { } | 209 | static inline void ioapic_init_mappings(void) { } |
190 | #endif | 210 | #endif |
191 | 211 | ||
192 | #endif | 212 | #endif /* ASM_X86__IO_APIC_H */ |
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h index c0c338bd4068..336603512399 100644 --- a/include/asm-x86/ioctls.h +++ b/include/asm-x86/ioctls.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_IOCTLS_H | 1 | #ifndef ASM_X86__IOCTLS_H |
2 | #define _ASM_X86_IOCTLS_H | 2 | #define ASM_X86__IOCTLS_H |
3 | 3 | ||
4 | #include <asm/ioctl.h> | 4 | #include <asm/ioctl.h> |
5 | 5 | ||
@@ -85,4 +85,4 @@ | |||
85 | 85 | ||
86 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | 86 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ |
87 | 87 | ||
88 | #endif | 88 | #endif /* ASM_X86__IOCTLS_H */ |
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index 5f888cc5be49..e86f44148c66 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X8664_IOMMU_H | 1 | #ifndef ASM_X86__IOMMU_H |
2 | #define _ASM_X8664_IOMMU_H 1 | 2 | #define ASM_X86__IOMMU_H |
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
@@ -42,4 +42,4 @@ static inline void gart_iommu_hole_init(void) | |||
42 | } | 42 | } |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #endif | 45 | #endif /* ASM_X86__IOMMU_H */ |
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h index ee678fd51594..910304fbdc8f 100644 --- a/include/asm-x86/ipcbuf.h +++ b/include/asm-x86/ipcbuf.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_IPCBUF_H | 1 | #ifndef ASM_X86__IPCBUF_H |
2 | #define _ASM_X86_IPCBUF_H | 2 | #define ASM_X86__IPCBUF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The ipc64_perm structure for x86 architecture. | 5 | * The ipc64_perm structure for x86 architecture. |
@@ -25,4 +25,4 @@ struct ipc64_perm { | |||
25 | unsigned long __unused2; | 25 | unsigned long __unused2; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | #endif /* _ASM_X86_IPCBUF_H */ | 28 | #endif /* ASM_X86__IPCBUF_H */ |
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index bb1c09f7a76c..30a692cfaff8 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_IPI_H | 1 | #ifndef ASM_X86__IPI_H |
2 | #define __ASM_IPI_H | 2 | #define ASM_X86__IPI_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 2004 James Cleverdon, IBM. | 5 | * Copyright 2004 James Cleverdon, IBM. |
@@ -49,6 +49,12 @@ static inline int __prepare_ICR2(unsigned int mask) | |||
49 | return SET_APIC_DEST_FIELD(mask); | 49 | return SET_APIC_DEST_FIELD(mask); |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline void __xapic_wait_icr_idle(void) | ||
53 | { | ||
54 | while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) | ||
55 | cpu_relax(); | ||
56 | } | ||
57 | |||
52 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, | 58 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, |
53 | unsigned int dest) | 59 | unsigned int dest) |
54 | { | 60 | { |
@@ -64,7 +70,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, | |||
64 | /* | 70 | /* |
65 | * Wait for idle. | 71 | * Wait for idle. |
66 | */ | 72 | */ |
67 | apic_wait_icr_idle(); | 73 | __xapic_wait_icr_idle(); |
68 | 74 | ||
69 | /* | 75 | /* |
70 | * No need to touch the target chip field | 76 | * No need to touch the target chip field |
@@ -74,7 +80,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, | |||
74 | /* | 80 | /* |
75 | * Send the IPI. The write to APIC_ICR fires this off. | 81 | * Send the IPI. The write to APIC_ICR fires this off. |
76 | */ | 82 | */ |
77 | apic_write(APIC_ICR, cfg); | 83 | native_apic_mem_write(APIC_ICR, cfg); |
78 | } | 84 | } |
79 | 85 | ||
80 | /* | 86 | /* |
@@ -92,13 +98,13 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
92 | if (unlikely(vector == NMI_VECTOR)) | 98 | if (unlikely(vector == NMI_VECTOR)) |
93 | safe_apic_wait_icr_idle(); | 99 | safe_apic_wait_icr_idle(); |
94 | else | 100 | else |
95 | apic_wait_icr_idle(); | 101 | __xapic_wait_icr_idle(); |
96 | 102 | ||
97 | /* | 103 | /* |
98 | * prepare target chip field | 104 | * prepare target chip field |
99 | */ | 105 | */ |
100 | cfg = __prepare_ICR2(mask); | 106 | cfg = __prepare_ICR2(mask); |
101 | apic_write(APIC_ICR2, cfg); | 107 | native_apic_mem_write(APIC_ICR2, cfg); |
102 | 108 | ||
103 | /* | 109 | /* |
104 | * program the ICR | 110 | * program the ICR |
@@ -108,7 +114,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
108 | /* | 114 | /* |
109 | * Send the IPI. The write to APIC_ICR fires this off. | 115 | * Send the IPI. The write to APIC_ICR fires this off. |
110 | */ | 116 | */ |
111 | apic_write(APIC_ICR, cfg); | 117 | native_apic_mem_write(APIC_ICR, cfg); |
112 | } | 118 | } |
113 | 119 | ||
114 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | 120 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) |
@@ -129,4 +135,4 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
129 | local_irq_restore(flags); | 135 | local_irq_restore(flags); |
130 | } | 136 | } |
131 | 137 | ||
132 | #endif /* __ASM_IPI_H */ | 138 | #endif /* ASM_X86__IPI_H */ |
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h index 1a2925757317..1e5f2909c1db 100644 --- a/include/asm-x86/irq.h +++ b/include/asm-x86/irq.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_IRQ_H | 1 | #ifndef ASM_X86__IRQ_H |
2 | #define _ASM_IRQ_H | 2 | #define ASM_X86__IRQ_H |
3 | /* | 3 | /* |
4 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | 4 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar |
5 | * | 5 | * |
@@ -47,4 +47,4 @@ extern void native_init_IRQ(void); | |||
47 | /* Interrupt vector management */ | 47 | /* Interrupt vector management */ |
48 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | 48 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); |
49 | 49 | ||
50 | #endif /* _ASM_IRQ_H */ | 50 | #endif /* ASM_X86__IRQ_H */ |
diff --git a/include/asm-x86/irq_regs_32.h b/include/asm-x86/irq_regs_32.h index 3368b20c0b48..316a3b258871 100644 --- a/include/asm-x86/irq_regs_32.h +++ b/include/asm-x86/irq_regs_32.h | |||
@@ -4,8 +4,8 @@ | |||
4 | * | 4 | * |
5 | * Jeremy Fitzhardinge <jeremy@goop.org> | 5 | * Jeremy Fitzhardinge <jeremy@goop.org> |
6 | */ | 6 | */ |
7 | #ifndef _ASM_I386_IRQ_REGS_H | 7 | #ifndef ASM_X86__IRQ_REGS_32_H |
8 | #define _ASM_I386_IRQ_REGS_H | 8 | #define ASM_X86__IRQ_REGS_32_H |
9 | 9 | ||
10 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
11 | 11 | ||
@@ -26,4 +26,4 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | |||
26 | return old_regs; | 26 | return old_regs; |
27 | } | 27 | } |
28 | 28 | ||
29 | #endif /* _ASM_I386_IRQ_REGS_H */ | 29 | #endif /* ASM_X86__IRQ_REGS_32_H */ |
diff --git a/include/asm-x86/irq_remapping.h b/include/asm-x86/irq_remapping.h new file mode 100644 index 000000000000..78242c6ffa58 --- /dev/null +++ b/include/asm-x86/irq_remapping.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASM_IRQ_REMAPPING_H | ||
2 | #define _ASM_IRQ_REMAPPING_H | ||
3 | |||
4 | extern int x2apic; | ||
5 | |||
6 | #define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) | ||
7 | |||
8 | #endif | ||
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h index a48c7f2dbdc0..c5d2d767a1f3 100644 --- a/include/asm-x86/irq_vectors.h +++ b/include/asm-x86/irq_vectors.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_H | 1 | #ifndef ASM_X86__IRQ_VECTORS_H |
2 | #define _ASM_IRQ_VECTORS_H | 2 | #define ASM_X86__IRQ_VECTORS_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | 5 | ||
@@ -179,4 +179,4 @@ | |||
179 | #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) | 179 | #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) |
180 | 180 | ||
181 | 181 | ||
182 | #endif /* _ASM_IRQ_VECTORS_H */ | 182 | #endif /* ASM_X86__IRQ_VECTORS_H */ |
diff --git a/include/asm-x86/ist.h b/include/asm-x86/ist.h index 6ec6ceed95a7..35a2fe9bc921 100644 --- a/include/asm-x86/ist.h +++ b/include/asm-x86/ist.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_IST_H | 1 | #ifndef ASM_X86__IST_H |
2 | #define _ASM_IST_H | 2 | #define ASM_X86__IST_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Include file for the interface to IST BIOS | 5 | * Include file for the interface to IST BIOS |
@@ -31,4 +31,4 @@ struct ist_info { | |||
31 | extern struct ist_info ist_info; | 31 | extern struct ist_info ist_info; |
32 | 32 | ||
33 | #endif /* __KERNEL__ */ | 33 | #endif /* __KERNEL__ */ |
34 | #endif /* _ASM_IST_H */ | 34 | #endif /* ASM_X86__IST_H */ |
diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h index 452e2b696ff4..2bbaf4370a55 100644 --- a/include/asm-x86/k8.h +++ b/include/asm-x86/k8.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_K8_H | 1 | #ifndef ASM_X86__K8_H |
2 | #define _ASM_K8_H 1 | 2 | #define ASM_X86__K8_H |
3 | 3 | ||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | 5 | ||
@@ -12,4 +12,4 @@ extern int cache_k8_northbridges(void); | |||
12 | extern void k8_flush_garts(void); | 12 | extern void k8_flush_garts(void); |
13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); | 13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); |
14 | 14 | ||
15 | #endif | 15 | #endif /* ASM_X86__K8_H */ |
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index 96651bb59ba1..5ec3ad3e825c 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_KDEBUG_H | 1 | #ifndef ASM_X86__KDEBUG_H |
2 | #define _ASM_X86_KDEBUG_H | 2 | #define ASM_X86__KDEBUG_H |
3 | 3 | ||
4 | #include <linux/notifier.h> | 4 | #include <linux/notifier.h> |
5 | 5 | ||
@@ -35,4 +35,4 @@ extern void show_regs(struct pt_regs *regs); | |||
35 | extern unsigned long oops_begin(void); | 35 | extern unsigned long oops_begin(void); |
36 | extern void oops_end(unsigned long, struct pt_regs *, int signr); | 36 | extern void oops_end(unsigned long, struct pt_regs *, int signr); |
37 | 37 | ||
38 | #endif | 38 | #endif /* ASM_X86__KDEBUG_H */ |
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 4246ab7dc988..ea09600d6129 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _KEXEC_H | 1 | #ifndef ASM_X86__KEXEC_H |
2 | #define _KEXEC_H | 2 | #define ASM_X86__KEXEC_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | # define PA_CONTROL_PAGE 0 | 5 | # define PA_CONTROL_PAGE 0 |
@@ -172,4 +172,4 @@ relocate_kernel(unsigned long indirection_page, | |||
172 | 172 | ||
173 | #endif /* __ASSEMBLY__ */ | 173 | #endif /* __ASSEMBLY__ */ |
174 | 174 | ||
175 | #endif /* _KEXEC_H */ | 175 | #endif /* ASM_X86__KEXEC_H */ |
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h index 484c47554f3b..83a7ee228ab1 100644 --- a/include/asm-x86/kgdb.h +++ b/include/asm-x86/kgdb.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_KGDB_H_ | 1 | #ifndef ASM_X86__KGDB_H |
2 | #define _ASM_KGDB_H_ | 2 | #define ASM_X86__KGDB_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright (C) 2001-2004 Amit S. Kale | 5 | * Copyright (C) 2001-2004 Amit S. Kale |
@@ -78,4 +78,4 @@ static inline void arch_kgdb_breakpoint(void) | |||
78 | #define BREAK_INSTR_SIZE 1 | 78 | #define BREAK_INSTR_SIZE 1 |
79 | #define CACHE_FLUSH_IS_SAFE 1 | 79 | #define CACHE_FLUSH_IS_SAFE 1 |
80 | 80 | ||
81 | #endif /* _ASM_KGDB_H_ */ | 81 | #endif /* ASM_X86__KGDB_H */ |
diff --git a/include/asm-x86/kmap_types.h b/include/asm-x86/kmap_types.h index 5f4174132a22..89f44493e643 100644 --- a/include/asm-x86/kmap_types.h +++ b/include/asm-x86/kmap_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_KMAP_TYPES_H | 1 | #ifndef ASM_X86__KMAP_TYPES_H |
2 | #define _ASM_X86_KMAP_TYPES_H | 2 | #define ASM_X86__KMAP_TYPES_H |
3 | 3 | ||
4 | #if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) | 4 | #if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) |
5 | # define D(n) __KM_FENCE_##n , | 5 | # define D(n) __KM_FENCE_##n , |
@@ -26,4 +26,4 @@ D(13) KM_TYPE_NR | |||
26 | 26 | ||
27 | #undef D | 27 | #undef D |
28 | 28 | ||
29 | #endif | 29 | #endif /* ASM_X86__KMAP_TYPES_H */ |
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index 54980b0b3892..bd8407863c13 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_KPROBES_H | 1 | #ifndef ASM_X86__KPROBES_H |
2 | #define _ASM_KPROBES_H | 2 | #define ASM_X86__KPROBES_H |
3 | /* | 3 | /* |
4 | * Kernel Probes (KProbes) | 4 | * Kernel Probes (KProbes) |
5 | * | 5 | * |
@@ -94,4 +94,4 @@ static inline void restore_interrupts(struct pt_regs *regs) | |||
94 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | 94 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); |
95 | extern int kprobe_exceptions_notify(struct notifier_block *self, | 95 | extern int kprobe_exceptions_notify(struct notifier_block *self, |
96 | unsigned long val, void *data); | 96 | unsigned long val, void *data); |
97 | #endif /* _ASM_KPROBES_H */ | 97 | #endif /* ASM_X86__KPROBES_H */ |
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h index 6f1840812e59..78e954db1e7f 100644 --- a/include/asm-x86/kvm.h +++ b/include/asm-x86/kvm.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __LINUX_KVM_X86_H | 1 | #ifndef ASM_X86__KVM_H |
2 | #define __LINUX_KVM_X86_H | 2 | #define ASM_X86__KVM_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * KVM x86 specific structures and definitions | 5 | * KVM x86 specific structures and definitions |
@@ -230,4 +230,4 @@ struct kvm_pit_state { | |||
230 | #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) | 230 | #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) |
231 | #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) | 231 | #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) |
232 | 232 | ||
233 | #endif | 233 | #endif /* ASM_X86__KVM_H */ |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index c2e34c275900..69794547f514 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -1,4 +1,4 @@ | |||
1 | #/* | 1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | 2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | 3 | * |
4 | * This header defines architecture specific interfaces, x86 version | 4 | * This header defines architecture specific interfaces, x86 version |
@@ -8,8 +8,8 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef ASM_KVM_HOST_H | 11 | #ifndef ASM_X86__KVM_HOST_H |
12 | #define ASM_KVM_HOST_H | 12 | #define ASM_X86__KVM_HOST_H |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
@@ -735,4 +735,4 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
735 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 735 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
736 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 736 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
737 | 737 | ||
738 | #endif | 738 | #endif /* ASM_X86__KVM_HOST_H */ |
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h index 76f392146daa..30054fded4fb 100644 --- a/include/asm-x86/kvm_para.h +++ b/include/asm-x86/kvm_para.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __X86_KVM_PARA_H | 1 | #ifndef ASM_X86__KVM_PARA_H |
2 | #define __X86_KVM_PARA_H | 2 | #define ASM_X86__KVM_PARA_H |
3 | 3 | ||
4 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It | 4 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It |
5 | * should be used to determine that a VM is running under KVM. | 5 | * should be used to determine that a VM is running under KVM. |
@@ -144,4 +144,4 @@ static inline unsigned int kvm_arch_para_features(void) | |||
144 | 144 | ||
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | #endif | 147 | #endif /* ASM_X86__KVM_PARA_H */ |
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index 4e8c1e48d91d..e2d9b030c1ac 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h | |||
@@ -8,8 +8,8 @@ | |||
8 | * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 | 8 | * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __X86_EMULATE_H__ | 11 | #ifndef ASM_X86__KVM_X86_EMULATE_H |
12 | #define __X86_EMULATE_H__ | 12 | #define ASM_X86__KVM_X86_EMULATE_H |
13 | 13 | ||
14 | struct x86_emulate_ctxt; | 14 | struct x86_emulate_ctxt; |
15 | 15 | ||
@@ -181,4 +181,4 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, | |||
181 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, | 181 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, |
182 | struct x86_emulate_ops *ops); | 182 | struct x86_emulate_ops *ops); |
183 | 183 | ||
184 | #endif /* __X86_EMULATE_H__ */ | 184 | #endif /* ASM_X86__KVM_X86_EMULATE_H */ |
diff --git a/include/asm-x86/ldt.h b/include/asm-x86/ldt.h index 20c597242b53..a5228504d867 100644 --- a/include/asm-x86/ldt.h +++ b/include/asm-x86/ldt.h | |||
@@ -3,8 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Definitions of structures used with the modify_ldt system call. | 4 | * Definitions of structures used with the modify_ldt system call. |
5 | */ | 5 | */ |
6 | #ifndef _ASM_X86_LDT_H | 6 | #ifndef ASM_X86__LDT_H |
7 | #define _ASM_X86_LDT_H | 7 | #define ASM_X86__LDT_H |
8 | 8 | ||
9 | /* Maximum number of LDT entries supported. */ | 9 | /* Maximum number of LDT entries supported. */ |
10 | #define LDT_ENTRIES 8192 | 10 | #define LDT_ENTRIES 8192 |
@@ -37,4 +37,4 @@ struct user_desc { | |||
37 | #define MODIFY_LDT_CONTENTS_CODE 2 | 37 | #define MODIFY_LDT_CONTENTS_CODE 2 |
38 | 38 | ||
39 | #endif /* !__ASSEMBLY__ */ | 39 | #endif /* !__ASSEMBLY__ */ |
40 | #endif | 40 | #endif /* ASM_X86__LDT_H */ |
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index be4a7247fa2b..7505e947ed27 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_LGUEST_H | 1 | #ifndef ASM_X86__LGUEST_H |
2 | #define _X86_LGUEST_H | 2 | #define ASM_X86__LGUEST_H |
3 | 3 | ||
4 | #define GDT_ENTRY_LGUEST_CS 10 | 4 | #define GDT_ENTRY_LGUEST_CS 10 |
5 | #define GDT_ENTRY_LGUEST_DS 11 | 5 | #define GDT_ENTRY_LGUEST_DS 11 |
@@ -91,4 +91,4 @@ static inline void lguest_set_ts(void) | |||
91 | 91 | ||
92 | #endif /* __ASSEMBLY__ */ | 92 | #endif /* __ASSEMBLY__ */ |
93 | 93 | ||
94 | #endif | 94 | #endif /* ASM_X86__LGUEST_H */ |
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index a3241f28e34a..8f034ba4b53e 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Architecture specific portion of the lguest hypercalls */ | 1 | /* Architecture specific portion of the lguest hypercalls */ |
2 | #ifndef _X86_LGUEST_HCALL_H | 2 | #ifndef ASM_X86__LGUEST_HCALL_H |
3 | #define _X86_LGUEST_HCALL_H | 3 | #define ASM_X86__LGUEST_HCALL_H |
4 | 4 | ||
5 | #define LHCALL_FLUSH_ASYNC 0 | 5 | #define LHCALL_FLUSH_ASYNC 0 |
6 | #define LHCALL_LGUEST_INIT 1 | 6 | #define LHCALL_LGUEST_INIT 1 |
@@ -68,4 +68,4 @@ struct hcall_args { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | #endif /* !__ASSEMBLY__ */ | 70 | #endif /* !__ASSEMBLY__ */ |
71 | #endif /* _I386_LGUEST_HCALL_H */ | 71 | #endif /* ASM_X86__LGUEST_HCALL_H */ |
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index 64e444f8e85b..42d8b62ee8ab 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | 1 | #ifndef ASM_X86__LINKAGE_H |
2 | #define __ASM_LINKAGE_H | 2 | #define ASM_X86__LINKAGE_H |
3 | 3 | ||
4 | #undef notrace | 4 | #undef notrace |
5 | #define notrace __attribute__((no_instrument_function)) | 5 | #define notrace __attribute__((no_instrument_function)) |
@@ -57,5 +57,5 @@ | |||
57 | #define __ALIGN_STR ".align 16,0x90" | 57 | #define __ALIGN_STR ".align 16,0x90" |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #endif | 60 | #endif /* ASM_X86__LINKAGE_H */ |
61 | 61 | ||
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index 330a72496abd..ae91994fd6c9 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ARCH_LOCAL_H | 1 | #ifndef ASM_X86__LOCAL_H |
2 | #define _ARCH_LOCAL_H | 2 | #define ASM_X86__LOCAL_H |
3 | 3 | ||
4 | #include <linux/percpu.h> | 4 | #include <linux/percpu.h> |
5 | 5 | ||
@@ -232,4 +232,4 @@ static inline long local_sub_return(long i, local_t *l) | |||
232 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | 232 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) |
233 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | 233 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) |
234 | 234 | ||
235 | #endif /* _ARCH_LOCAL_H */ | 235 | #endif /* ASM_X86__LOCAL_H */ |
diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h index 989f34c37d32..2aa61b54fbd5 100644 --- a/include/asm-x86/mach-default/apm.h +++ b/include/asm-x86/mach-default/apm.h | |||
@@ -3,8 +3,8 @@ | |||
3 | * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> | 3 | * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _ASM_APM_H | 6 | #ifndef ASM_X86__MACH_DEFAULT__APM_H |
7 | #define _ASM_APM_H | 7 | #define ASM_X86__MACH_DEFAULT__APM_H |
8 | 8 | ||
9 | #ifdef APM_ZERO_SEGS | 9 | #ifdef APM_ZERO_SEGS |
10 | # define APM_DO_ZERO_SEGS \ | 10 | # define APM_DO_ZERO_SEGS \ |
@@ -70,4 +70,4 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
70 | return error; | 70 | return error; |
71 | } | 71 | } |
72 | 72 | ||
73 | #endif /* _ASM_APM_H */ | 73 | #endif /* ASM_X86__MACH_DEFAULT__APM_H */ |
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index f3226b9a6b82..2a330a41b3dd 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef ASM_X86__MACH_DEFAULT__MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define ASM_X86__MACH_DEFAULT__MACH_APIC_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_LOCAL_APIC | 4 | #ifdef CONFIG_X86_LOCAL_APIC |
5 | 5 | ||
@@ -30,6 +30,8 @@ static inline cpumask_t target_cpus(void) | |||
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
31 | #define phys_pkg_id (genapic->phys_pkg_id) | 31 | #define phys_pkg_id (genapic->phys_pkg_id) |
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 32 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
33 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) | ||
34 | #define send_IPI_self (genapic->send_IPI_self) | ||
33 | extern void setup_apic_routing(void); | 35 | extern void setup_apic_routing(void); |
34 | #else | 36 | #else |
35 | #define INT_DELIVERY_MODE dest_LowestPrio | 37 | #define INT_DELIVERY_MODE dest_LowestPrio |
@@ -54,7 +56,7 @@ static inline void init_apic_ldr(void) | |||
54 | 56 | ||
55 | static inline int apic_id_registered(void) | 57 | static inline int apic_id_registered(void) |
56 | { | 58 | { |
57 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); | 59 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
58 | } | 60 | } |
59 | 61 | ||
60 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 62 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) |
@@ -138,4 +140,4 @@ static inline void enable_apic_mode(void) | |||
138 | } | 140 | } |
139 | 141 | ||
140 | #endif /* CONFIG_X86_LOCAL_APIC */ | 142 | #endif /* CONFIG_X86_LOCAL_APIC */ |
141 | #endif /* __ASM_MACH_APIC_H */ | 143 | #endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */ |
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index e4b29ba37de6..0c2d41c41b20 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h | |||
@@ -1,12 +1,12 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | 1 | #ifndef ASM_X86__MACH_DEFAULT__MACH_APICDEF_H |
2 | #define __ASM_MACH_APICDEF_H | 2 | #define ASM_X86__MACH_DEFAULT__MACH_APICDEF_H |
3 | 3 | ||
4 | #include <asm/apic.h> | 4 | #include <asm/apic.h> |
5 | 5 | ||
6 | #ifdef CONFIG_X86_64 | 6 | #ifdef CONFIG_X86_64 |
7 | #define APIC_ID_MASK (0xFFu<<24) | 7 | #define APIC_ID_MASK (genapic->apic_id_mask) |
8 | #define GET_APIC_ID(x) (((x)>>24)&0xFFu) | 8 | #define GET_APIC_ID(x) (genapic->get_apic_id(x)) |
9 | #define SET_APIC_ID(x) (((x)<<24)) | 9 | #define SET_APIC_ID(x) (genapic->set_apic_id(x)) |
10 | #else | 10 | #else |
11 | #define APIC_ID_MASK (0xF<<24) | 11 | #define APIC_ID_MASK (0xF<<24) |
12 | static inline unsigned get_apic_id(unsigned long x) | 12 | static inline unsigned get_apic_id(unsigned long x) |
@@ -21,4 +21,4 @@ static inline unsigned get_apic_id(unsigned long x) | |||
21 | #define GET_APIC_ID(x) get_apic_id(x) | 21 | #define GET_APIC_ID(x) get_apic_id(x) |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #endif | 24 | #endif /* ASM_X86__MACH_DEFAULT__MACH_APICDEF_H */ |
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h index be323364e68f..674bc7e50c35 100644 --- a/include/asm-x86/mach-default/mach_ipi.h +++ b/include/asm-x86/mach-default/mach_ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef ASM_X86__MACH_DEFAULT__MACH_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define ASM_X86__MACH_DEFAULT__MACH_IPI_H |
3 | 3 | ||
4 | /* Avoid include hell */ | 4 | /* Avoid include hell */ |
5 | #define NMI_VECTOR 0x02 | 5 | #define NMI_VECTOR 0x02 |
@@ -61,4 +61,4 @@ static inline void send_IPI_all(int vector) | |||
61 | } | 61 | } |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #endif /* __ASM_MACH_IPI_H */ | 64 | #endif /* ASM_X86__MACH_DEFAULT__MACH_IPI_H */ |
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index d14108505bb8..9c381f2815ac 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H |
3 | 3 | ||
4 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 4 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
5 | char *productid) | 5 | char *productid) |
@@ -14,4 +14,4 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
14 | } | 14 | } |
15 | 15 | ||
16 | 16 | ||
17 | #endif /* __ASM_MACH_MPPARSE_H */ | 17 | #endif /* ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H */ |
diff --git a/include/asm-x86/mach-default/mach_mpspec.h b/include/asm-x86/mach-default/mach_mpspec.h index 51c9a9775932..d77646f011f1 100644 --- a/include/asm-x86/mach-default/mach_mpspec.h +++ b/include/asm-x86/mach-default/mach_mpspec.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | 1 | #ifndef ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H |
2 | #define __ASM_MACH_MPSPEC_H | 2 | #define ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H |
3 | 3 | ||
4 | #define MAX_IRQ_SOURCES 256 | 4 | #define MAX_IRQ_SOURCES 256 |
5 | 5 | ||
@@ -9,4 +9,4 @@ | |||
9 | #define MAX_MP_BUSSES 32 | 9 | #define MAX_MP_BUSSES 32 |
10 | #endif | 10 | #endif |
11 | 11 | ||
12 | #endif /* __ASM_MACH_MPSPEC_H */ | 12 | #endif /* ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H */ |
diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h index 4b76e536cd98..990b15833834 100644 --- a/include/asm-x86/mach-default/mach_timer.h +++ b/include/asm-x86/mach-default/mach_timer.h | |||
@@ -10,8 +10,8 @@ | |||
10 | * directly because of the awkward 8-bit access mechanism of the 82C54 | 10 | * directly because of the awkward 8-bit access mechanism of the 82C54 |
11 | * device. | 11 | * device. |
12 | */ | 12 | */ |
13 | #ifndef _MACH_TIMER_H | 13 | #ifndef ASM_X86__MACH_DEFAULT__MACH_TIMER_H |
14 | #define _MACH_TIMER_H | 14 | #define ASM_X86__MACH_DEFAULT__MACH_TIMER_H |
15 | 15 | ||
16 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ | 16 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ |
17 | #define CALIBRATE_LATCH \ | 17 | #define CALIBRATE_LATCH \ |
@@ -45,4 +45,4 @@ static inline void mach_countup(unsigned long *count_p) | |||
45 | *count_p = count; | 45 | *count_p = count; |
46 | } | 46 | } |
47 | 47 | ||
48 | #endif /* !_MACH_TIMER_H */ | 48 | #endif /* ASM_X86__MACH_DEFAULT__MACH_TIMER_H */ |
diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h index 2fe7705c0484..de9ac3f5c4ce 100644 --- a/include/asm-x86/mach-default/mach_traps.h +++ b/include/asm-x86/mach-default/mach_traps.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * Machine specific NMI handling for generic. | 2 | * Machine specific NMI handling for generic. |
3 | * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> | 3 | * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> |
4 | */ | 4 | */ |
5 | #ifndef _MACH_TRAPS_H | 5 | #ifndef ASM_X86__MACH_DEFAULT__MACH_TRAPS_H |
6 | #define _MACH_TRAPS_H | 6 | #define ASM_X86__MACH_DEFAULT__MACH_TRAPS_H |
7 | 7 | ||
8 | #include <asm/mc146818rtc.h> | 8 | #include <asm/mc146818rtc.h> |
9 | 9 | ||
@@ -36,4 +36,4 @@ static inline void reassert_nmi(void) | |||
36 | unlock_cmos(); | 36 | unlock_cmos(); |
37 | } | 37 | } |
38 | 38 | ||
39 | #endif /* !_MACH_TRAPS_H */ | 39 | #endif /* ASM_X86__MACH_DEFAULT__MACH_TRAPS_H */ |
diff --git a/include/asm-x86/mach-default/mach_wakecpu.h b/include/asm-x86/mach-default/mach_wakecpu.h index 3ebb17893aa5..361b810f5160 100644 --- a/include/asm-x86/mach-default/mach_wakecpu.h +++ b/include/asm-x86/mach-default/mach_wakecpu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_WAKECPU_H | 1 | #ifndef ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H |
2 | #define __ASM_MACH_WAKECPU_H | 2 | #define ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file copes with machines that wakeup secondary CPUs by the | 5 | * This file copes with machines that wakeup secondary CPUs by the |
@@ -39,4 +39,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
39 | #define inquire_remote_apic(apicid) {} | 39 | #define inquire_remote_apic(apicid) {} |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #endif /* __ASM_MACH_WAKECPU_H */ | 42 | #endif /* ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H */ |
diff --git a/include/asm-x86/mach-es7000/mach_apicdef.h b/include/asm-x86/mach-es7000/mach_apicdef.h deleted file mode 100644 index a58ab5a75c8c..000000000000 --- a/include/asm-x86/mach-es7000/mach_apicdef.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h index 5305dcb96df2..6ce0f7786ef8 100644 --- a/include/asm-x86/mach-generic/gpio.h +++ b/include/asm-x86/mach-generic/gpio.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_GENERIC_GPIO_H | 1 | #ifndef ASM_X86__MACH_GENERIC__GPIO_H |
2 | #define __ASM_MACH_GENERIC_GPIO_H | 2 | #define ASM_X86__MACH_GENERIC__GPIO_H |
3 | 3 | ||
4 | int gpio_request(unsigned gpio, const char *label); | 4 | int gpio_request(unsigned gpio, const char *label); |
5 | void gpio_free(unsigned gpio); | 5 | void gpio_free(unsigned gpio); |
@@ -12,4 +12,4 @@ int irq_to_gpio(unsigned irq); | |||
12 | 12 | ||
13 | #include <asm-generic/gpio.h> /* cansleep wrappers */ | 13 | #include <asm-generic/gpio.h> /* cansleep wrappers */ |
14 | 14 | ||
15 | #endif /* __ASM_MACH_GENERIC_GPIO_H */ | 15 | #endif /* ASM_X86__MACH_GENERIC__GPIO_H */ |
diff --git a/include/asm-x86/mach-generic/irq_vectors_limits.h b/include/asm-x86/mach-generic/irq_vectors_limits.h index 890ce3f5e09a..f7870e1a220d 100644 --- a/include/asm-x86/mach-generic/irq_vectors_limits.h +++ b/include/asm-x86/mach-generic/irq_vectors_limits.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H | 1 | #ifndef ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H |
2 | #define _ASM_IRQ_VECTORS_LIMITS_H | 2 | #define ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, | 5 | * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, |
@@ -11,4 +11,4 @@ | |||
11 | #define NR_IRQS 224 | 11 | #define NR_IRQS 224 |
12 | #define NR_IRQ_VECTORS 1024 | 12 | #define NR_IRQ_VECTORS 1024 |
13 | 13 | ||
14 | #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ | 14 | #endif /* ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H */ |
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h index 6eff343e1233..5d010c6881dd 100644 --- a/include/asm-x86/mach-generic/mach_apic.h +++ b/include/asm-x86/mach-generic/mach_apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef ASM_X86__MACH_GENERIC__MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define ASM_X86__MACH_GENERIC__MACH_APIC_H |
3 | 3 | ||
4 | #include <asm/genapic.h> | 4 | #include <asm/genapic.h> |
5 | 5 | ||
@@ -29,4 +29,4 @@ | |||
29 | 29 | ||
30 | extern void generic_bigsmp_probe(void); | 30 | extern void generic_bigsmp_probe(void); |
31 | 31 | ||
32 | #endif /* __ASM_MACH_APIC_H */ | 32 | #endif /* ASM_X86__MACH_GENERIC__MACH_APIC_H */ |
diff --git a/include/asm-x86/mach-generic/mach_apicdef.h b/include/asm-x86/mach-generic/mach_apicdef.h index 28ed98972ca8..1657f38b8f27 100644 --- a/include/asm-x86/mach-generic/mach_apicdef.h +++ b/include/asm-x86/mach-generic/mach_apicdef.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _GENAPIC_MACH_APICDEF_H | 1 | #ifndef ASM_X86__MACH_GENERIC__MACH_APICDEF_H |
2 | #define _GENAPIC_MACH_APICDEF_H 1 | 2 | #define ASM_X86__MACH_GENERIC__MACH_APICDEF_H |
3 | 3 | ||
4 | #ifndef APIC_DEFINITION | 4 | #ifndef APIC_DEFINITION |
5 | #include <asm/genapic.h> | 5 | #include <asm/genapic.h> |
@@ -8,4 +8,4 @@ | |||
8 | #define APIC_ID_MASK (genapic->apic_id_mask) | 8 | #define APIC_ID_MASK (genapic->apic_id_mask) |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | #endif | 11 | #endif /* ASM_X86__MACH_GENERIC__MACH_APICDEF_H */ |
diff --git a/include/asm-x86/mach-generic/mach_ipi.h b/include/asm-x86/mach-generic/mach_ipi.h index 441b0fe3ed1d..f67433dbd65f 100644 --- a/include/asm-x86/mach-generic/mach_ipi.h +++ b/include/asm-x86/mach-generic/mach_ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _MACH_IPI_H | 1 | #ifndef ASM_X86__MACH_GENERIC__MACH_IPI_H |
2 | #define _MACH_IPI_H 1 | 2 | #define ASM_X86__MACH_GENERIC__MACH_IPI_H |
3 | 3 | ||
4 | #include <asm/genapic.h> | 4 | #include <asm/genapic.h> |
5 | 5 | ||
@@ -7,4 +7,4 @@ | |||
7 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) | 7 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) |
8 | #define send_IPI_all (genapic->send_IPI_all) | 8 | #define send_IPI_all (genapic->send_IPI_all) |
9 | 9 | ||
10 | #endif | 10 | #endif /* ASM_X86__MACH_GENERIC__MACH_IPI_H */ |
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index 586cadbf3787..3115564e557c 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _MACH_MPPARSE_H | 1 | #ifndef ASM_X86__MACH_GENERIC__MACH_MPPARSE_H |
2 | #define _MACH_MPPARSE_H 1 | 2 | #define ASM_X86__MACH_GENERIC__MACH_MPPARSE_H |
3 | 3 | ||
4 | 4 | ||
5 | extern int mps_oem_check(struct mp_config_table *mpc, char *oem, | 5 | extern int mps_oem_check(struct mp_config_table *mpc, char *oem, |
@@ -7,4 +7,4 @@ extern int mps_oem_check(struct mp_config_table *mpc, char *oem, | |||
7 | 7 | ||
8 | extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); | 8 | extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); |
9 | 9 | ||
10 | #endif | 10 | #endif /* ASM_X86__MACH_GENERIC__MACH_MPPARSE_H */ |
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h index c83c120be538..6061b153613e 100644 --- a/include/asm-x86/mach-generic/mach_mpspec.h +++ b/include/asm-x86/mach-generic/mach_mpspec.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | 1 | #ifndef ASM_X86__MACH_GENERIC__MACH_MPSPEC_H |
2 | #define __ASM_MACH_MPSPEC_H | 2 | #define ASM_X86__MACH_GENERIC__MACH_MPSPEC_H |
3 | 3 | ||
4 | #define MAX_IRQ_SOURCES 256 | 4 | #define MAX_IRQ_SOURCES 256 |
5 | 5 | ||
@@ -9,4 +9,4 @@ | |||
9 | 9 | ||
10 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | 10 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, |
11 | char *productid); | 11 | char *productid); |
12 | #endif /* __ASM_MACH_MPSPEC_H */ | 12 | #endif /* ASM_X86__MACH_GENERIC__MACH_MPSPEC_H */ |
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h index acce0b7d397b..6184561980f2 100644 --- a/include/asm-x86/mach-rdc321x/gpio.h +++ b/include/asm-x86/mach-rdc321x/gpio.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _RDC321X_GPIO_H | 1 | #ifndef ASM_X86__MACH_RDC321X__GPIO_H |
2 | #define _RDC321X_GPIO_H | 2 | #define ASM_X86__MACH_RDC321X__GPIO_H |
3 | 3 | ||
4 | extern int rdc_gpio_get_value(unsigned gpio); | 4 | extern int rdc_gpio_get_value(unsigned gpio); |
5 | extern void rdc_gpio_set_value(unsigned gpio, int value); | 5 | extern void rdc_gpio_set_value(unsigned gpio, int value); |
@@ -54,4 +54,4 @@ static inline int irq_to_gpio(unsigned irq) | |||
54 | /* For cansleep */ | 54 | /* For cansleep */ |
55 | #include <asm-generic/gpio.h> | 55 | #include <asm-generic/gpio.h> |
56 | 56 | ||
57 | #endif /* _RDC321X_GPIO_H_ */ | 57 | #endif /* ASM_X86__MACH_RDC321X__GPIO_H */ |
diff --git a/include/asm-x86/mach-summit/mach_apicdef.h b/include/asm-x86/mach-summit/mach_apicdef.h deleted file mode 100644 index a58ab5a75c8c..000000000000 --- a/include/asm-x86/mach-summit/mach_apicdef.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h index 9bf4ae93ab10..5768d8e95c8c 100644 --- a/include/asm-x86/math_emu.h +++ b/include/asm-x86/math_emu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_MATH_EMU_H | 1 | #ifndef ASM_X86__MATH_EMU_H |
2 | #define _I386_MATH_EMU_H | 2 | #define ASM_X86__MATH_EMU_H |
3 | 3 | ||
4 | /* This structure matches the layout of the data saved to the stack | 4 | /* This structure matches the layout of the data saved to the stack |
5 | following a device-not-present interrupt, part of it saved | 5 | following a device-not-present interrupt, part of it saved |
@@ -28,4 +28,4 @@ struct info { | |||
28 | long ___vm86_fs; | 28 | long ___vm86_fs; |
29 | long ___vm86_gs; | 29 | long ___vm86_gs; |
30 | }; | 30 | }; |
31 | #endif | 31 | #endif /* ASM_X86__MATH_EMU_H */ |
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index daf1ccde77af..a995f33176cd 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Machine dependent access functions for RTC registers. | 2 | * Machine dependent access functions for RTC registers. |
3 | */ | 3 | */ |
4 | #ifndef _ASM_MC146818RTC_H | 4 | #ifndef ASM_X86__MC146818RTC_H |
5 | #define _ASM_MC146818RTC_H | 5 | #define ASM_X86__MC146818RTC_H |
6 | 6 | ||
7 | #include <asm/io.h> | 7 | #include <asm/io.h> |
8 | #include <asm/system.h> | 8 | #include <asm/system.h> |
@@ -101,4 +101,4 @@ extern unsigned long mach_get_cmos_time(void); | |||
101 | 101 | ||
102 | #define RTC_IRQ 8 | 102 | #define RTC_IRQ 8 |
103 | 103 | ||
104 | #endif /* _ASM_MC146818RTC_H */ | 104 | #endif /* ASM_X86__MC146818RTC_H */ |
diff --git a/include/asm-x86/mca.h b/include/asm-x86/mca.h index 09adf2eac4dc..60d1ed287b13 100644 --- a/include/asm-x86/mca.h +++ b/include/asm-x86/mca.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | 1 | /* -*- mode: c; c-basic-offset: 8 -*- */ |
2 | 2 | ||
3 | /* Platform specific MCA defines */ | 3 | /* Platform specific MCA defines */ |
4 | #ifndef _ASM_MCA_H | 4 | #ifndef ASM_X86__MCA_H |
5 | #define _ASM_MCA_H | 5 | #define ASM_X86__MCA_H |
6 | 6 | ||
7 | /* Maximal number of MCA slots - actually, some machines have less, but | 7 | /* Maximal number of MCA slots - actually, some machines have less, but |
8 | * they all have sufficient number of POS registers to cover 8. | 8 | * they all have sufficient number of POS registers to cover 8. |
@@ -40,4 +40,4 @@ | |||
40 | */ | 40 | */ |
41 | #define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) | 41 | #define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) |
42 | 42 | ||
43 | #endif | 43 | #endif /* ASM_X86__MCA_H */ |
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h index c3dca6edc6b1..49f22be237d2 100644 --- a/include/asm-x86/mca_dma.h +++ b/include/asm-x86/mca_dma.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef MCA_DMA_H | 1 | #ifndef ASM_X86__MCA_DMA_H |
2 | #define MCA_DMA_H | 2 | #define ASM_X86__MCA_DMA_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <linux/ioport.h> | 5 | #include <linux/ioport.h> |
@@ -198,4 +198,4 @@ static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) | |||
198 | outb(mode, MCA_DMA_REG_EXE); | 198 | outb(mode, MCA_DMA_REG_EXE); |
199 | } | 199 | } |
200 | 200 | ||
201 | #endif /* MCA_DMA_H */ | 201 | #endif /* ASM_X86__MCA_DMA_H */ |
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h index 531eaa587455..036133eaf744 100644 --- a/include/asm-x86/mce.h +++ b/include/asm-x86/mce.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_MCE_H | 1 | #ifndef ASM_X86__MCE_H |
2 | #define _ASM_X86_MCE_H | 2 | #define ASM_X86__MCE_H |
3 | 3 | ||
4 | #ifdef __x86_64__ | 4 | #ifdef __x86_64__ |
5 | 5 | ||
@@ -127,4 +127,4 @@ extern void restart_mce(void); | |||
127 | 127 | ||
128 | #endif /* __KERNEL__ */ | 128 | #endif /* __KERNEL__ */ |
129 | 129 | ||
130 | #endif | 130 | #endif /* ASM_X86__MCE_H */ |
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h index 90bc4108a4fd..4ef28e6de383 100644 --- a/include/asm-x86/mman.h +++ b/include/asm-x86/mman.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_MMAN_H | 1 | #ifndef ASM_X86__MMAN_H |
2 | #define _ASM_X86_MMAN_H | 2 | #define ASM_X86__MMAN_H |
3 | 3 | ||
4 | #include <asm-generic/mman.h> | 4 | #include <asm-generic/mman.h> |
5 | 5 | ||
@@ -17,4 +17,4 @@ | |||
17 | #define MCL_CURRENT 1 /* lock all current mappings */ | 17 | #define MCL_CURRENT 1 /* lock all current mappings */ |
18 | #define MCL_FUTURE 2 /* lock all future mappings */ | 18 | #define MCL_FUTURE 2 /* lock all future mappings */ |
19 | 19 | ||
20 | #endif /* _ASM_X86_MMAN_H */ | 20 | #endif /* ASM_X86__MMAN_H */ |
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h index e293ab81e850..fb79b1cf5d07 100644 --- a/include/asm-x86/mmconfig.h +++ b/include/asm-x86/mmconfig.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_MMCONFIG_H | 1 | #ifndef ASM_X86__MMCONFIG_H |
2 | #define _ASM_MMCONFIG_H | 2 | #define ASM_X86__MMCONFIG_H |
3 | 3 | ||
4 | #ifdef CONFIG_PCI_MMCONFIG | 4 | #ifdef CONFIG_PCI_MMCONFIG |
5 | extern void __cpuinit fam10h_check_enable_mmcfg(void); | 5 | extern void __cpuinit fam10h_check_enable_mmcfg(void); |
@@ -9,4 +9,4 @@ static inline void fam10h_check_enable_mmcfg(void) { } | |||
9 | static inline void check_enable_amd_mmconf_dmi(void) { } | 9 | static inline void check_enable_amd_mmconf_dmi(void) { } |
10 | #endif | 10 | #endif |
11 | 11 | ||
12 | #endif | 12 | #endif /* ASM_X86__MMCONFIG_H */ |
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index 00e88679e11f..a30d7a9c8297 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_MMU_H | 1 | #ifndef ASM_X86__MMU_H |
2 | #define _ASM_X86_MMU_H | 2 | #define ASM_X86__MMU_H |
3 | 3 | ||
4 | #include <linux/spinlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <linux/mutex.h> | 5 | #include <linux/mutex.h> |
@@ -28,4 +28,4 @@ static inline void leave_mm(int cpu) | |||
28 | } | 28 | } |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #endif /* _ASM_X86_MMU_H */ | 31 | #endif /* ASM_X86__MMU_H */ |
diff --git a/include/asm-x86/mmu_context.h b/include/asm-x86/mmu_context.h index fac57014e7c6..8ec940bfd079 100644 --- a/include/asm-x86/mmu_context.h +++ b/include/asm-x86/mmu_context.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_X86_MMU_CONTEXT_H | 1 | #ifndef ASM_X86__MMU_CONTEXT_H |
2 | #define __ASM_X86_MMU_CONTEXT_H | 2 | #define ASM_X86__MMU_CONTEXT_H |
3 | 3 | ||
4 | #include <asm/desc.h> | 4 | #include <asm/desc.h> |
5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
@@ -34,4 +34,4 @@ do { \ | |||
34 | } while (0); | 34 | } while (0); |
35 | 35 | ||
36 | 36 | ||
37 | #endif /* __ASM_X86_MMU_CONTEXT_H */ | 37 | #endif /* ASM_X86__MMU_CONTEXT_H */ |
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 824fc575c6d8..cce6f6e4afd6 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __I386_SCHED_H | 1 | #ifndef ASM_X86__MMU_CONTEXT_32_H |
2 | #define __I386_SCHED_H | 2 | #define ASM_X86__MMU_CONTEXT_32_H |
3 | 3 | ||
4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
5 | { | 5 | { |
@@ -53,4 +53,4 @@ static inline void switch_mm(struct mm_struct *prev, | |||
53 | #define deactivate_mm(tsk, mm) \ | 53 | #define deactivate_mm(tsk, mm) \ |
54 | asm("movl %0,%%gs": :"r" (0)); | 54 | asm("movl %0,%%gs": :"r" (0)); |
55 | 55 | ||
56 | #endif | 56 | #endif /* ASM_X86__MMU_CONTEXT_32_H */ |
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index c7000634ccae..26758673c828 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __X86_64_MMU_CONTEXT_H | 1 | #ifndef ASM_X86__MMU_CONTEXT_64_H |
2 | #define __X86_64_MMU_CONTEXT_H | 2 | #define ASM_X86__MMU_CONTEXT_64_H |
3 | 3 | ||
4 | #include <asm/pda.h> | 4 | #include <asm/pda.h> |
5 | 5 | ||
@@ -51,4 +51,4 @@ do { \ | |||
51 | asm volatile("movl %0,%%fs"::"r"(0)); \ | 51 | asm volatile("movl %0,%%fs"::"r"(0)); \ |
52 | } while (0) | 52 | } while (0) |
53 | 53 | ||
54 | #endif | 54 | #endif /* ASM_X86__MMU_CONTEXT_64_H */ |
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h index 940881218ff8..2e7299bb3653 100644 --- a/include/asm-x86/mmx.h +++ b/include/asm-x86/mmx.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_MMX_H | 1 | #ifndef ASM_X86__MMX_H |
2 | #define _ASM_MMX_H | 2 | #define ASM_X86__MMX_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * MMX 3Dnow! helper operations | 5 | * MMX 3Dnow! helper operations |
@@ -11,4 +11,4 @@ extern void *_mmx_memcpy(void *to, const void *from, size_t size); | |||
11 | extern void mmx_clear_page(void *page); | 11 | extern void mmx_clear_page(void *page); |
12 | extern void mmx_copy_page(void *to, void *from); | 12 | extern void mmx_copy_page(void *to, void *from); |
13 | 13 | ||
14 | #endif | 14 | #endif /* ASM_X86__MMX_H */ |
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 5862e6460658..121b65d61d86 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h | |||
@@ -3,8 +3,8 @@ | |||
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _ASM_MMZONE_H_ | 6 | #ifndef ASM_X86__MMZONE_32_H |
7 | #define _ASM_MMZONE_H_ | 7 | #define ASM_X86__MMZONE_32_H |
8 | 8 | ||
9 | #include <asm/smp.h> | 9 | #include <asm/smp.h> |
10 | 10 | ||
@@ -131,4 +131,4 @@ static inline int pfn_valid(int pfn) | |||
131 | }) | 131 | }) |
132 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ | 132 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ |
133 | 133 | ||
134 | #endif /* _ASM_MMZONE_H_ */ | 134 | #endif /* ASM_X86__MMZONE_32_H */ |
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index 594bd0dc1d08..626b03a14875 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* K8 NUMA support */ | 1 | /* K8 NUMA support */ |
2 | /* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ | 2 | /* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ |
3 | /* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ | 3 | /* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ |
4 | #ifndef _ASM_X86_64_MMZONE_H | 4 | #ifndef ASM_X86__MMZONE_64_H |
5 | #define _ASM_X86_64_MMZONE_H 1 | 5 | #define ASM_X86__MMZONE_64_H |
6 | 6 | ||
7 | 7 | ||
8 | #ifdef CONFIG_NUMA | 8 | #ifdef CONFIG_NUMA |
@@ -49,4 +49,4 @@ extern int early_pfn_to_nid(unsigned long pfn); | |||
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #endif | 51 | #endif |
52 | #endif | 52 | #endif /* ASM_X86__MMZONE_64_H */ |
diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h index bfedb247871c..48dc3e0c07d9 100644 --- a/include/asm-x86/module.h +++ b/include/asm-x86/module.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_MODULE_H | 1 | #ifndef ASM_X86__MODULE_H |
2 | #define _ASM_MODULE_H | 2 | #define ASM_X86__MODULE_H |
3 | 3 | ||
4 | /* x86_32/64 are simple */ | 4 | /* x86_32/64 are simple */ |
5 | struct mod_arch_specific {}; | 5 | struct mod_arch_specific {}; |
@@ -79,4 +79,4 @@ struct mod_arch_specific {}; | |||
79 | # define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE | 79 | # define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #endif /* _ASM_MODULE_H */ | 82 | #endif /* ASM_X86__MODULE_H */ |
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index b6995e567fcc..118da365e371 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _AM_X86_MPSPEC_H | 1 | #ifndef ASM_X86__MPSPEC_H |
2 | #define _AM_X86_MPSPEC_H | 2 | #define ASM_X86__MPSPEC_H |
3 | 3 | ||
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | 5 | ||
@@ -141,4 +141,4 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) | |||
141 | 141 | ||
142 | extern physid_mask_t phys_cpu_present_map; | 142 | extern physid_mask_t phys_cpu_present_map; |
143 | 143 | ||
144 | #endif | 144 | #endif /* ASM_X86__MPSPEC_H */ |
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 38d1e73b49e4..79166b048012 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MPSPEC_DEF_H | 1 | #ifndef ASM_X86__MPSPEC_DEF_H |
2 | #define __ASM_MPSPEC_DEF_H | 2 | #define ASM_X86__MPSPEC_DEF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Structure definitions for SMP machines following the | 5 | * Structure definitions for SMP machines following the |
@@ -177,4 +177,4 @@ enum mp_bustype { | |||
177 | MP_BUS_PCI, | 177 | MP_BUS_PCI, |
178 | MP_BUS_MCA, | 178 | MP_BUS_MCA, |
179 | }; | 179 | }; |
180 | #endif | 180 | #endif /* ASM_X86__MPSPEC_DEF_H */ |
diff --git a/include/asm-x86/msgbuf.h b/include/asm-x86/msgbuf.h index 7e4e9481f51c..1b538c907a3d 100644 --- a/include/asm-x86/msgbuf.h +++ b/include/asm-x86/msgbuf.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_MSGBUF_H | 1 | #ifndef ASM_X86__MSGBUF_H |
2 | #define _ASM_X86_MSGBUF_H | 2 | #define ASM_X86__MSGBUF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The msqid64_ds structure for i386 architecture. | 5 | * The msqid64_ds structure for i386 architecture. |
@@ -36,4 +36,4 @@ struct msqid64_ds { | |||
36 | unsigned long __unused5; | 36 | unsigned long __unused5; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #endif /* _ASM_X86_MSGBUF_H */ | 39 | #endif /* ASM_X86__MSGBUF_H */ |
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h index 296f29ce426d..ed9190246876 100644 --- a/include/asm-x86/msidef.h +++ b/include/asm-x86/msidef.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_MSIDEF_H | 1 | #ifndef ASM_X86__MSIDEF_H |
2 | #define ASM_MSIDEF_H | 2 | #define ASM_X86__MSIDEF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Constants for Intel APIC based MSI messages. | 5 | * Constants for Intel APIC based MSI messages. |
@@ -48,4 +48,8 @@ | |||
48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ | 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ |
49 | MSI_ADDR_DEST_ID_MASK) | 49 | MSI_ADDR_DEST_ID_MASK) |
50 | 50 | ||
51 | #endif /* ASM_MSIDEF_H */ | 51 | #define MSI_ADDR_IR_EXT_INT (1 << 4) |
52 | #define MSI_ADDR_IR_SHV (1 << 3) | ||
53 | #define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13) | ||
54 | #define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5) | ||
55 | #endif /* ASM_X86__MSIDEF_H */ | ||
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 44bce773012e..3052f058ab06 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MSR_INDEX_H | 1 | #ifndef ASM_X86__MSR_INDEX_H |
2 | #define __ASM_MSR_INDEX_H | 2 | #define ASM_X86__MSR_INDEX_H |
3 | 3 | ||
4 | /* CPU model specific register (MSR) numbers */ | 4 | /* CPU model specific register (MSR) numbers */ |
5 | 5 | ||
@@ -310,4 +310,4 @@ | |||
310 | /* Geode defined MSRs */ | 310 | /* Geode defined MSRs */ |
311 | #define MSR_GEODE_BUSCONT_CONF0 0x00001900 | 311 | #define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
312 | 312 | ||
313 | #endif /* __ASM_MSR_INDEX_H */ | 313 | #endif /* ASM_X86__MSR_INDEX_H */ |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 2362cfda1fbc..530af1f6389e 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_X86_MSR_H_ | 1 | #ifndef ASM_X86__MSR_H |
2 | #define __ASM_X86_MSR_H_ | 2 | #define ASM_X86__MSR_H |
3 | 3 | ||
4 | #include <asm/msr-index.h> | 4 | #include <asm/msr-index.h> |
5 | 5 | ||
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, | |||
63 | return EAX_EDX_VAL(val, low, high); | 63 | return EAX_EDX_VAL(val, low, high); |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline unsigned long long native_read_msr_amd_safe(unsigned int msr, | ||
67 | int *err) | ||
68 | { | ||
69 | DECLARE_ARGS(val, low, high); | ||
70 | |||
71 | asm volatile("2: rdmsr ; xor %0,%0\n" | ||
72 | "1:\n\t" | ||
73 | ".section .fixup,\"ax\"\n\t" | ||
74 | "3: mov %3,%0 ; jmp 1b\n\t" | ||
75 | ".previous\n\t" | ||
76 | _ASM_EXTABLE(2b, 3b) | ||
77 | : "=r" (*err), EAX_EDX_RET(val, low, high) | ||
78 | : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT)); | ||
79 | return EAX_EDX_VAL(val, low, high); | ||
80 | } | ||
81 | |||
66 | static inline void native_write_msr(unsigned int msr, | 82 | static inline void native_write_msr(unsigned int msr, |
67 | unsigned low, unsigned high) | 83 | unsigned low, unsigned high) |
68 | { | 84 | { |
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
158 | *p = native_read_msr_safe(msr, &err); | 174 | *p = native_read_msr_safe(msr, &err); |
159 | return err; | 175 | return err; |
160 | } | 176 | } |
177 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
178 | { | ||
179 | int err; | ||
180 | |||
181 | *p = native_read_msr_amd_safe(msr, &err); | ||
182 | return err; | ||
183 | } | ||
161 | 184 | ||
162 | #define rdtscl(low) \ | 185 | #define rdtscl(low) \ |
163 | ((low) = (u32)native_read_tsc()) | 186 | ((low) = (u32)native_read_tsc()) |
@@ -221,4 +244,4 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
221 | #endif /* __KERNEL__ */ | 244 | #endif /* __KERNEL__ */ |
222 | 245 | ||
223 | 246 | ||
224 | #endif | 247 | #endif /* ASM_X86__MSR_H */ |
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index a69a01a51729..23a7f83da953 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h | |||
@@ -20,8 +20,8 @@ | |||
20 | The postal address is: | 20 | The postal address is: |
21 | Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. | 21 | Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. |
22 | */ | 22 | */ |
23 | #ifndef _ASM_X86_MTRR_H | 23 | #ifndef ASM_X86__MTRR_H |
24 | #define _ASM_X86_MTRR_H | 24 | #define ASM_X86__MTRR_H |
25 | 25 | ||
26 | #include <linux/ioctl.h> | 26 | #include <linux/ioctl.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
@@ -170,4 +170,4 @@ struct mtrr_gentry32 { | |||
170 | 170 | ||
171 | #endif /* __KERNEL__ */ | 171 | #endif /* __KERNEL__ */ |
172 | 172 | ||
173 | #endif /* _ASM_X86_MTRR_H */ | 173 | #endif /* ASM_X86__MTRR_H */ |
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index 73e928ef5f03..25c16d8ba3c7 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
8 | */ | 8 | */ |
9 | #ifndef _ASM_MUTEX_H | 9 | #ifndef ASM_X86__MUTEX_32_H |
10 | #define _ASM_MUTEX_H | 10 | #define ASM_X86__MUTEX_32_H |
11 | 11 | ||
12 | #include <asm/alternative.h> | 12 | #include <asm/alternative.h> |
13 | 13 | ||
@@ -122,4 +122,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count, | |||
122 | #endif | 122 | #endif |
123 | } | 123 | } |
124 | 124 | ||
125 | #endif | 125 | #endif /* ASM_X86__MUTEX_32_H */ |
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h index f3fae9becb38..918ba21ab9d9 100644 --- a/include/asm-x86/mutex_64.h +++ b/include/asm-x86/mutex_64.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
8 | */ | 8 | */ |
9 | #ifndef _ASM_MUTEX_H | 9 | #ifndef ASM_X86__MUTEX_64_H |
10 | #define _ASM_MUTEX_H | 10 | #define ASM_X86__MUTEX_64_H |
11 | 11 | ||
12 | /** | 12 | /** |
13 | * __mutex_fastpath_lock - decrement and call function if negative | 13 | * __mutex_fastpath_lock - decrement and call function if negative |
@@ -97,4 +97,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count, | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | #endif | 100 | #endif /* ASM_X86__MUTEX_64_H */ |
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 21f8d0202a82..f8b76f383904 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_NMI_H_ | 1 | #ifndef ASM_X86__NMI_H |
2 | #define _ASM_X86_NMI_H_ | 2 | #define ASM_X86__NMI_H |
3 | 3 | ||
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <asm/irq.h> | 5 | #include <asm/irq.h> |
@@ -81,4 +81,4 @@ void enable_lapic_nmi_watchdog(void); | |||
81 | void stop_nmi(void); | 81 | void stop_nmi(void); |
82 | void restart_nmi(void); | 82 | void restart_nmi(void); |
83 | 83 | ||
84 | #endif | 84 | #endif /* ASM_X86__NMI_H */ |
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index ad0bedd10b89..ae742721ae73 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_NOPS_H | 1 | #ifndef ASM_X86__NOPS_H |
2 | #define _ASM_NOPS_H 1 | 2 | #define ASM_X86__NOPS_H |
3 | 3 | ||
4 | /* Define nops for use with alternative() */ | 4 | /* Define nops for use with alternative() */ |
5 | 5 | ||
@@ -115,4 +115,4 @@ | |||
115 | 115 | ||
116 | #define ASM_NOP_MAX 8 | 116 | #define ASM_NOP_MAX 8 |
117 | 117 | ||
118 | #endif | 118 | #endif /* ASM_X86__NOPS_H */ |
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h index 220d7b7707a0..44cb07855c5b 100644 --- a/include/asm-x86/numa_32.h +++ b/include/asm-x86/numa_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_32_NUMA_H | 1 | #ifndef ASM_X86__NUMA_32_H |
2 | #define _ASM_X86_32_NUMA_H 1 | 2 | #define ASM_X86__NUMA_32_H |
3 | 3 | ||
4 | extern int pxm_to_nid(int pxm); | 4 | extern int pxm_to_nid(int pxm); |
5 | extern void numa_remove_cpu(int cpu); | 5 | extern void numa_remove_cpu(int cpu); |
@@ -8,4 +8,4 @@ extern void numa_remove_cpu(int cpu); | |||
8 | extern void set_highmem_pages_init(void); | 8 | extern void set_highmem_pages_init(void); |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | #endif /* _ASM_X86_32_NUMA_H */ | 11 | #endif /* ASM_X86__NUMA_32_H */ |
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 3830094434a9..15c990395b02 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X8664_NUMA_H | 1 | #ifndef ASM_X86__NUMA_64_H |
2 | #define _ASM_X8664_NUMA_H 1 | 2 | #define ASM_X86__NUMA_64_H |
3 | 3 | ||
4 | #include <linux/nodemask.h> | 4 | #include <linux/nodemask.h> |
5 | #include <asm/apicdef.h> | 5 | #include <asm/apicdef.h> |
@@ -40,4 +40,4 @@ static inline void numa_add_cpu(int cpu, int node) { } | |||
40 | static inline void numa_remove_cpu(int cpu) { } | 40 | static inline void numa_remove_cpu(int cpu) { } |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #endif | 43 | #endif /* ASM_X86__NUMA_64_H */ |
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h index 34b92d581fa3..124bf7d4b70a 100644 --- a/include/asm-x86/numaq.h +++ b/include/asm-x86/numaq.h | |||
@@ -23,8 +23,8 @@ | |||
23 | * Send feedback to <gone@us.ibm.com> | 23 | * Send feedback to <gone@us.ibm.com> |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #ifndef NUMAQ_H | 26 | #ifndef ASM_X86__NUMAQ_H |
27 | #define NUMAQ_H | 27 | #define ASM_X86__NUMAQ_H |
28 | 28 | ||
29 | #ifdef CONFIG_X86_NUMAQ | 29 | #ifdef CONFIG_X86_NUMAQ |
30 | 30 | ||
@@ -165,5 +165,5 @@ static inline int get_memcfg_numaq(void) | |||
165 | return 0; | 165 | return 0; |
166 | } | 166 | } |
167 | #endif /* CONFIG_X86_NUMAQ */ | 167 | #endif /* CONFIG_X86_NUMAQ */ |
168 | #endif /* NUMAQ_H */ | 168 | #endif /* ASM_X86__NUMAQ_H */ |
169 | 169 | ||
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/numaq/apic.h index d802465e026a..a8344ba6ea15 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/numaq/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_NUMAQ_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_NUMAQ_APIC_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <linux/mmzone.h> | 5 | #include <linux/mmzone.h> |
@@ -135,4 +135,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
135 | return cpuid_apic >> index_msb; | 135 | return cpuid_apic >> index_msb; |
136 | } | 136 | } |
137 | 137 | ||
138 | #endif /* __ASM_MACH_APIC_H */ | 138 | #endif /* __ASM_NUMAQ_APIC_H */ |
diff --git a/include/asm-x86/mach-numaq/mach_apicdef.h b/include/asm-x86/numaq/apicdef.h index bf439d0690f5..e012a46cc22a 100644 --- a/include/asm-x86/mach-numaq/mach_apicdef.h +++ b/include/asm-x86/numaq/apicdef.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | 1 | #ifndef __ASM_NUMAQ_APICDEF_H |
2 | #define __ASM_MACH_APICDEF_H | 2 | #define __ASM_NUMAQ_APICDEF_H |
3 | 3 | ||
4 | 4 | ||
5 | #define APIC_ID_MASK (0xF<<24) | 5 | #define APIC_ID_MASK (0xF<<24) |
diff --git a/include/asm-x86/mach-numaq/mach_ipi.h b/include/asm-x86/numaq/ipi.h index c6044488e9e6..935588d286cf 100644 --- a/include/asm-x86/mach-numaq/mach_ipi.h +++ b/include/asm-x86/numaq/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_NUMAQ_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_NUMAQ_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t, int vector); |
5 | 5 | ||
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector) | |||
22 | send_IPI_mask(cpu_online_map, vector); | 22 | send_IPI_mask(cpu_online_map, vector); |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* __ASM_MACH_IPI_H */ | 25 | #endif /* __ASM_NUMAQ_IPI_H */ |
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/numaq/mpparse.h index 626aef6b155f..252292e077b6 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/numaq/mpparse.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_NUMAQ_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_NUMAQ_MPPARSE_H |
3 | 3 | ||
4 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | 4 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, |
5 | char *productid); | 5 | char *productid); |
6 | 6 | ||
7 | #endif /* __ASM_MACH_MPPARSE_H */ | 7 | #endif /* __ASM_NUMAQ_MPPARSE_H */ |
diff --git a/include/asm-x86/mach-numaq/mach_wakecpu.h b/include/asm-x86/numaq/wakecpu.h index 00530041a991..c577bda5b1c5 100644 --- a/include/asm-x86/mach-numaq/mach_wakecpu.h +++ b/include/asm-x86/numaq/wakecpu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_WAKECPU_H | 1 | #ifndef __ASM_NUMAQ_WAKECPU_H |
2 | #define __ASM_MACH_WAKECPU_H | 2 | #define __ASM_NUMAQ_WAKECPU_H |
3 | 3 | ||
4 | /* This file copes with machines that wakeup secondary CPUs by NMIs */ | 4 | /* This file copes with machines that wakeup secondary CPUs by NMIs */ |
5 | 5 | ||
@@ -40,4 +40,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
40 | 40 | ||
41 | #define inquire_remote_apic(apicid) {} | 41 | #define inquire_remote_apic(apicid) {} |
42 | 42 | ||
43 | #endif /* __ASM_MACH_WAKECPU_H */ | 43 | #endif /* __ASM_NUMAQ_WAKECPU_H */ |
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h index 97d47133486f..d7328b1a05c1 100644 --- a/include/asm-x86/olpc.h +++ b/include/asm-x86/olpc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* OLPC machine specific definitions */ | 1 | /* OLPC machine specific definitions */ |
2 | 2 | ||
3 | #ifndef ASM_OLPC_H_ | 3 | #ifndef ASM_X86__OLPC_H |
4 | #define ASM_OLPC_H_ | 4 | #define ASM_X86__OLPC_H |
5 | 5 | ||
6 | #include <asm/geode.h> | 6 | #include <asm/geode.h> |
7 | 7 | ||
@@ -129,4 +129,4 @@ extern int olpc_ec_mask_unset(uint8_t bits); | |||
129 | #define OLPC_GPIO_LID geode_gpio(26) | 129 | #define OLPC_GPIO_LID geode_gpio(26) |
130 | #define OLPC_GPIO_ECSCI geode_gpio(27) | 130 | #define OLPC_GPIO_ECSCI geode_gpio(27) |
131 | 131 | ||
132 | #endif | 132 | #endif /* ASM_X86__OLPC_H */ |
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 49982110e4d9..79544e6ffb8b 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PAGE_H | 1 | #ifndef ASM_X86__PAGE_H |
2 | #define _ASM_X86_PAGE_H | 2 | #define ASM_X86__PAGE_H |
3 | 3 | ||
4 | #include <linux/const.h> | 4 | #include <linux/const.h> |
5 | 5 | ||
@@ -199,4 +199,4 @@ static inline pteval_t native_pte_flags(pte_t pte) | |||
199 | #define __HAVE_ARCH_GATE_AREA 1 | 199 | #define __HAVE_ARCH_GATE_AREA 1 |
200 | 200 | ||
201 | #endif /* __KERNEL__ */ | 201 | #endif /* __KERNEL__ */ |
202 | #endif /* _ASM_X86_PAGE_H */ | 202 | #endif /* ASM_X86__PAGE_H */ |
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index ab8528793f08..f32062a821c5 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PAGE_32_H | 1 | #ifndef ASM_X86__PAGE_32_H |
2 | #define _ASM_X86_PAGE_32_H | 2 | #define ASM_X86__PAGE_32_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This handles the memory map. | 5 | * This handles the memory map. |
@@ -96,6 +96,7 @@ extern void find_low_pfn_range(void); | |||
96 | extern unsigned long init_memory_mapping(unsigned long start, | 96 | extern unsigned long init_memory_mapping(unsigned long start, |
97 | unsigned long end); | 97 | unsigned long end); |
98 | extern void initmem_init(unsigned long, unsigned long); | 98 | extern void initmem_init(unsigned long, unsigned long); |
99 | extern void free_initmem(void); | ||
99 | extern void setup_bootmem_allocator(void); | 100 | extern void setup_bootmem_allocator(void); |
100 | 101 | ||
101 | 102 | ||
@@ -126,4 +127,4 @@ static inline void copy_page(void *to, void *from) | |||
126 | #endif /* CONFIG_X86_3DNOW */ | 127 | #endif /* CONFIG_X86_3DNOW */ |
127 | #endif /* !__ASSEMBLY__ */ | 128 | #endif /* !__ASSEMBLY__ */ |
128 | 129 | ||
129 | #endif /* _ASM_X86_PAGE_32_H */ | 130 | #endif /* ASM_X86__PAGE_32_H */ |
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index c6916c83e6b1..5e64acfed0a4 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_64_PAGE_H | 1 | #ifndef ASM_X86__PAGE_64_H |
2 | #define _X86_64_PAGE_H | 2 | #define ASM_X86__PAGE_64_H |
3 | 3 | ||
4 | #define PAGETABLE_LEVELS 4 | 4 | #define PAGETABLE_LEVELS 4 |
5 | 5 | ||
@@ -91,6 +91,7 @@ extern unsigned long init_memory_mapping(unsigned long start, | |||
91 | unsigned long end); | 91 | unsigned long end); |
92 | 92 | ||
93 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); | 93 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); |
94 | extern void free_initmem(void); | ||
94 | 95 | ||
95 | extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); | 96 | extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); |
96 | extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); | 97 | extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); |
@@ -102,4 +103,4 @@ extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); | |||
102 | #endif | 103 | #endif |
103 | 104 | ||
104 | 105 | ||
105 | #endif /* _X86_64_PAGE_H */ | 106 | #endif /* ASM_X86__PAGE_64_H */ |
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h index 6f0d0422f4ca..0009cfb11a5f 100644 --- a/include/asm-x86/param.h +++ b/include/asm-x86/param.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PARAM_H | 1 | #ifndef ASM_X86__PARAM_H |
2 | #define _ASM_X86_PARAM_H | 2 | #define ASM_X86__PARAM_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | 5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ |
@@ -19,4 +19,4 @@ | |||
19 | 19 | ||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | 20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ |
21 | 21 | ||
22 | #endif /* _ASM_X86_PARAM_H */ | 22 | #endif /* ASM_X86__PARAM_H */ |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index fbbde93f12d6..cba612c89e06 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_PARAVIRT_H | 1 | #ifndef ASM_X86__PARAVIRT_H |
2 | #define __ASM_PARAVIRT_H | 2 | #define ASM_X86__PARAVIRT_H |
3 | /* Various instructions on x86 need to be replaced for | 3 | /* Various instructions on x86 need to be replaced for |
4 | * para-virtualization: those hooks are defined here. */ | 4 | * para-virtualization: those hooks are defined here. */ |
5 | 5 | ||
@@ -137,6 +137,7 @@ struct pv_cpu_ops { | |||
137 | 137 | ||
138 | /* MSR, PMC and TSR operations. | 138 | /* MSR, PMC and TSR operations. |
139 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 139 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
140 | u64 (*read_msr_amd)(unsigned int msr, int *err); | ||
140 | u64 (*read_msr)(unsigned int msr, int *err); | 141 | u64 (*read_msr)(unsigned int msr, int *err); |
141 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | 142 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
142 | 143 | ||
@@ -200,12 +201,6 @@ struct pv_irq_ops { | |||
200 | 201 | ||
201 | struct pv_apic_ops { | 202 | struct pv_apic_ops { |
202 | #ifdef CONFIG_X86_LOCAL_APIC | 203 | #ifdef CONFIG_X86_LOCAL_APIC |
203 | /* | ||
204 | * Direct APIC operations, principally for VMI. Ideally | ||
205 | * these shouldn't be in this interface. | ||
206 | */ | ||
207 | void (*apic_write)(unsigned long reg, u32 v); | ||
208 | u32 (*apic_read)(unsigned long reg); | ||
209 | void (*setup_boot_clock)(void); | 204 | void (*setup_boot_clock)(void); |
210 | void (*setup_secondary_clock)(void); | 205 | void (*setup_secondary_clock)(void); |
211 | 206 | ||
@@ -726,6 +721,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) | |||
726 | { | 721 | { |
727 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); | 722 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
728 | } | 723 | } |
724 | static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) | ||
725 | { | ||
726 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); | ||
727 | } | ||
729 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | 728 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) |
730 | { | 729 | { |
731 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); | 730 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
@@ -771,6 +770,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
771 | *p = paravirt_read_msr(msr, &err); | 770 | *p = paravirt_read_msr(msr, &err); |
772 | return err; | 771 | return err; |
773 | } | 772 | } |
773 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
774 | { | ||
775 | int err; | ||
776 | |||
777 | *p = paravirt_read_msr_amd(msr, &err); | ||
778 | return err; | ||
779 | } | ||
774 | 780 | ||
775 | static inline u64 paravirt_read_tsc(void) | 781 | static inline u64 paravirt_read_tsc(void) |
776 | { | 782 | { |
@@ -898,19 +904,6 @@ static inline void slow_down_io(void) | |||
898 | } | 904 | } |
899 | 905 | ||
900 | #ifdef CONFIG_X86_LOCAL_APIC | 906 | #ifdef CONFIG_X86_LOCAL_APIC |
901 | /* | ||
902 | * Basic functions accessing APICs. | ||
903 | */ | ||
904 | static inline void apic_write(unsigned long reg, u32 v) | ||
905 | { | ||
906 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); | ||
907 | } | ||
908 | |||
909 | static inline u32 apic_read(unsigned long reg) | ||
910 | { | ||
911 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); | ||
912 | } | ||
913 | |||
914 | static inline void setup_boot_clock(void) | 907 | static inline void setup_boot_clock(void) |
915 | { | 908 | { |
916 | PVOP_VCALL0(pv_apic_ops.setup_boot_clock); | 909 | PVOP_VCALL0(pv_apic_ops.setup_boot_clock); |
@@ -1634,4 +1627,4 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1634 | 1627 | ||
1635 | #endif /* __ASSEMBLY__ */ | 1628 | #endif /* __ASSEMBLY__ */ |
1636 | #endif /* CONFIG_PARAVIRT */ | 1629 | #endif /* CONFIG_PARAVIRT */ |
1637 | #endif /* __ASM_PARAVIRT_H */ | 1630 | #endif /* ASM_X86__PARAVIRT_H */ |
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h index 3c4ffeb467e9..2e3dda4dc3d9 100644 --- a/include/asm-x86/parport.h +++ b/include/asm-x86/parport.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PARPORT_H | 1 | #ifndef ASM_X86__PARPORT_H |
2 | #define _ASM_X86_PARPORT_H | 2 | #define ASM_X86__PARPORT_H |
3 | 3 | ||
4 | static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); | 4 | static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); |
5 | static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) | 5 | static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) |
@@ -7,4 +7,4 @@ static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) | |||
7 | return parport_pc_find_isa_ports(autoirq, autodma); | 7 | return parport_pc_find_isa_ports(autoirq, autodma); |
8 | } | 8 | } |
9 | 9 | ||
10 | #endif /* _ASM_X86_PARPORT_H */ | 10 | #endif /* ASM_X86__PARPORT_H */ |
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h index 7edc47307217..482c3e3f9879 100644 --- a/include/asm-x86/pat.h +++ b/include/asm-x86/pat.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_PAT_H | 1 | #ifndef ASM_X86__PAT_H |
2 | #define _ASM_PAT_H | 2 | #define ASM_X86__PAT_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
@@ -19,4 +19,4 @@ extern int free_memtype(u64 start, u64 end); | |||
19 | 19 | ||
20 | extern void pat_disable(char *reason); | 20 | extern void pat_disable(char *reason); |
21 | 21 | ||
22 | #endif | 22 | #endif /* ASM_X86__PAT_H */ |
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h index 80c775d9fe20..da42be07b690 100644 --- a/include/asm-x86/pci-direct.h +++ b/include/asm-x86/pci-direct.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_PCI_DIRECT_H | 1 | #ifndef ASM_X86__PCI_DIRECT_H |
2 | #define ASM_PCI_DIRECT_H 1 | 2 | #define ASM_X86__PCI_DIRECT_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
@@ -18,4 +18,4 @@ extern int early_pci_allowed(void); | |||
18 | extern unsigned int pci_early_dump_regs; | 18 | extern unsigned int pci_early_dump_regs; |
19 | extern void early_dump_pci_device(u8 bus, u8 slot, u8 func); | 19 | extern void early_dump_pci_device(u8 bus, u8 slot, u8 func); |
20 | extern void early_dump_pci_devices(void); | 20 | extern void early_dump_pci_devices(void); |
21 | #endif | 21 | #endif /* ASM_X86__PCI_DIRECT_H */ |
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index 2db14cf17db8..602583192991 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __x86_PCI_H | 1 | #ifndef ASM_X86__PCI_H |
2 | #define __x86_PCI_H | 2 | #define ASM_X86__PCI_H |
3 | 3 | ||
4 | #include <linux/mm.h> /* for struct page */ | 4 | #include <linux/mm.h> /* for struct page */ |
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
@@ -111,4 +111,4 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) | |||
111 | } | 111 | } |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | #endif | 114 | #endif /* ASM_X86__PCI_H */ |
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h index a50d46851285..3f2288207c0c 100644 --- a/include/asm-x86/pci_32.h +++ b/include/asm-x86/pci_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __i386_PCI_H | 1 | #ifndef ASM_X86__PCI_32_H |
2 | #define __i386_PCI_H | 2 | #define ASM_X86__PCI_32_H |
3 | 3 | ||
4 | 4 | ||
5 | #ifdef __KERNEL__ | 5 | #ifdef __KERNEL__ |
@@ -31,4 +31,4 @@ struct pci_dev; | |||
31 | #endif /* __KERNEL__ */ | 31 | #endif /* __KERNEL__ */ |
32 | 32 | ||
33 | 33 | ||
34 | #endif /* __i386_PCI_H */ | 34 | #endif /* ASM_X86__PCI_32_H */ |
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index f330234ffa5c..f72e12d5770e 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __x8664_PCI_H | 1 | #ifndef ASM_X86__PCI_64_H |
2 | #define __x8664_PCI_H | 2 | #define ASM_X86__PCI_64_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
@@ -63,4 +63,4 @@ extern void pci_iommu_alloc(void); | |||
63 | 63 | ||
64 | #endif /* __KERNEL__ */ | 64 | #endif /* __KERNEL__ */ |
65 | 65 | ||
66 | #endif /* __x8664_PCI_H */ | 66 | #endif /* ASM_X86__PCI_64_H */ |
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index b34e9a7cc80b..80860afffbdb 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef X86_64_PDA_H | 1 | #ifndef ASM_X86__PDA_H |
2 | #define X86_64_PDA_H | 2 | #define ASM_X86__PDA_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
@@ -134,4 +134,4 @@ do { \ | |||
134 | 134 | ||
135 | #define PDA_STACKOFFSET (5*8) | 135 | #define PDA_STACKOFFSET (5*8) |
136 | 136 | ||
137 | #endif | 137 | #endif /* ASM_X86__PDA_H */ |
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index f643a3a92da0..e10a1d0678cf 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PERCPU_H_ | 1 | #ifndef ASM_X86__PERCPU_H |
2 | #define _ASM_X86_PERCPU_H_ | 2 | #define ASM_X86__PERCPU_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_64 | 4 | #ifdef CONFIG_X86_64 |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
@@ -215,4 +215,4 @@ do { \ | |||
215 | 215 | ||
216 | #endif /* !CONFIG_SMP */ | 216 | #endif /* !CONFIG_SMP */ |
217 | 217 | ||
218 | #endif /* _ASM_X86_PERCPU_H_ */ | 218 | #endif /* ASM_X86__PERCPU_H */ |
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h index d63ea431cb3b..3cd23adedae8 100644 --- a/include/asm-x86/pgalloc.h +++ b/include/asm-x86/pgalloc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PGALLOC_H | 1 | #ifndef ASM_X86__PGALLOC_H |
2 | #define _ASM_X86_PGALLOC_H | 2 | #define ASM_X86__PGALLOC_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/mm.h> /* for struct page */ | 5 | #include <linux/mm.h> /* for struct page */ |
@@ -111,4 +111,4 @@ extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); | |||
111 | #endif /* PAGETABLE_LEVELS > 3 */ | 111 | #endif /* PAGETABLE_LEVELS > 3 */ |
112 | #endif /* PAGETABLE_LEVELS > 2 */ | 112 | #endif /* PAGETABLE_LEVELS > 2 */ |
113 | 113 | ||
114 | #endif /* _ASM_X86_PGALLOC_H */ | 114 | #endif /* ASM_X86__PGALLOC_H */ |
diff --git a/include/asm-x86/pgtable-2level-defs.h b/include/asm-x86/pgtable-2level-defs.h index 0f71c9f13da4..7ec48f4e5347 100644 --- a/include/asm-x86/pgtable-2level-defs.h +++ b/include/asm-x86/pgtable-2level-defs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_PGTABLE_2LEVEL_DEFS_H | 1 | #ifndef ASM_X86__PGTABLE_2LEVEL_DEFS_H |
2 | #define _I386_PGTABLE_2LEVEL_DEFS_H | 2 | #define ASM_X86__PGTABLE_2LEVEL_DEFS_H |
3 | 3 | ||
4 | #define SHARED_KERNEL_PMD 0 | 4 | #define SHARED_KERNEL_PMD 0 |
5 | 5 | ||
@@ -17,4 +17,4 @@ | |||
17 | 17 | ||
18 | #define PTRS_PER_PTE 1024 | 18 | #define PTRS_PER_PTE 1024 |
19 | 19 | ||
20 | #endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ | 20 | #endif /* ASM_X86__PGTABLE_2LEVEL_DEFS_H */ |
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 46bc52c0eae1..60440b191626 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_PGTABLE_2LEVEL_H | 1 | #ifndef ASM_X86__PGTABLE_2LEVEL_H |
2 | #define _I386_PGTABLE_2LEVEL_H | 2 | #define ASM_X86__PGTABLE_2LEVEL_H |
3 | 3 | ||
4 | #define pte_ERROR(e) \ | 4 | #define pte_ERROR(e) \ |
5 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) | 5 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) |
@@ -78,4 +78,4 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
78 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) | 78 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
79 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) | 79 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
80 | 80 | ||
81 | #endif /* _I386_PGTABLE_2LEVEL_H */ | 81 | #endif /* ASM_X86__PGTABLE_2LEVEL_H */ |
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h index 448ac9516314..c05fe6ff3720 100644 --- a/include/asm-x86/pgtable-3level-defs.h +++ b/include/asm-x86/pgtable-3level-defs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_PGTABLE_3LEVEL_DEFS_H | 1 | #ifndef ASM_X86__PGTABLE_3LEVEL_DEFS_H |
2 | #define _I386_PGTABLE_3LEVEL_DEFS_H | 2 | #define ASM_X86__PGTABLE_3LEVEL_DEFS_H |
3 | 3 | ||
4 | #ifdef CONFIG_PARAVIRT | 4 | #ifdef CONFIG_PARAVIRT |
5 | #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) | 5 | #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) |
@@ -25,4 +25,4 @@ | |||
25 | */ | 25 | */ |
26 | #define PTRS_PER_PTE 512 | 26 | #define PTRS_PER_PTE 512 |
27 | 27 | ||
28 | #endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ | 28 | #endif /* ASM_X86__PGTABLE_3LEVEL_DEFS_H */ |
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 105057f34032..e713bd5f39a6 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_PGTABLE_3LEVEL_H | 1 | #ifndef ASM_X86__PGTABLE_3LEVEL_H |
2 | #define _I386_PGTABLE_3LEVEL_H | 2 | #define ASM_X86__PGTABLE_3LEVEL_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Intel Physical Address Extension (PAE) Mode - three-level page | 5 | * Intel Physical Address Extension (PAE) Mode - three-level page |
@@ -179,4 +179,4 @@ static inline unsigned long pte_pfn(pte_t pte) | |||
179 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | 179 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
180 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) | 180 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
181 | 181 | ||
182 | #endif /* _I386_PGTABLE_3LEVEL_H */ | 182 | #endif /* ASM_X86__PGTABLE_3LEVEL_H */ |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 04caa2f544df..57d919a2d79d 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PGTABLE_H | 1 | #ifndef ASM_X86__PGTABLE_H |
2 | #define _ASM_X86_PGTABLE_H | 2 | #define ASM_X86__PGTABLE_H |
3 | 3 | ||
4 | #define FIRST_USER_ADDRESS 0 | 4 | #define FIRST_USER_ADDRESS 0 |
5 | 5 | ||
@@ -313,6 +313,8 @@ static inline void native_pagetable_setup_start(pgd_t *base) {} | |||
313 | static inline void native_pagetable_setup_done(pgd_t *base) {} | 313 | static inline void native_pagetable_setup_done(pgd_t *base) {} |
314 | #endif | 314 | #endif |
315 | 315 | ||
316 | extern int arch_report_meminfo(char *page); | ||
317 | |||
316 | #ifdef CONFIG_PARAVIRT | 318 | #ifdef CONFIG_PARAVIRT |
317 | #include <asm/paravirt.h> | 319 | #include <asm/paravirt.h> |
318 | #else /* !CONFIG_PARAVIRT */ | 320 | #else /* !CONFIG_PARAVIRT */ |
@@ -521,4 +523,4 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
521 | #include <asm-generic/pgtable.h> | 523 | #include <asm-generic/pgtable.h> |
522 | #endif /* __ASSEMBLY__ */ | 524 | #endif /* __ASSEMBLY__ */ |
523 | 525 | ||
524 | #endif /* _ASM_X86_PGTABLE_H */ | 526 | #endif /* ASM_X86__PGTABLE_H */ |
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 5c3b26567a95..45c8235400fe 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_PGTABLE_H | 1 | #ifndef ASM_X86__PGTABLE_32_H |
2 | #define _I386_PGTABLE_H | 2 | #define ASM_X86__PGTABLE_32_H |
3 | 3 | ||
4 | 4 | ||
5 | /* | 5 | /* |
@@ -31,6 +31,7 @@ static inline void pgtable_cache_init(void) { } | |||
31 | static inline void check_pgt_cache(void) { } | 31 | static inline void check_pgt_cache(void) { } |
32 | void paging_init(void); | 32 | void paging_init(void); |
33 | 33 | ||
34 | extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | ||
34 | 35 | ||
35 | /* | 36 | /* |
36 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | 37 | * The Linux x86 paging architecture is 'compile-time dual-mode', it |
@@ -186,4 +187,4 @@ do { \ | |||
186 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 187 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
187 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 188 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
188 | 189 | ||
189 | #endif /* _I386_PGTABLE_H */ | 190 | #endif /* ASM_X86__PGTABLE_32_H */ |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 549144d03d99..e3dcf7a08a0b 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_64_PGTABLE_H | 1 | #ifndef ASM_X86__PGTABLE_64_H |
2 | #define _X86_64_PGTABLE_H | 2 | #define ASM_X86__PGTABLE_64_H |
3 | 3 | ||
4 | #include <linux/const.h> | 4 | #include <linux/const.h> |
5 | #ifndef __ASSEMBLY__ | 5 | #ifndef __ASSEMBLY__ |
@@ -284,4 +284,4 @@ extern void cleanup_highmap(void); | |||
284 | #define __HAVE_ARCH_PTE_SAME | 284 | #define __HAVE_ARCH_PTE_SAME |
285 | #endif /* !__ASSEMBLY__ */ | 285 | #endif /* !__ASSEMBLY__ */ |
286 | 286 | ||
287 | #endif /* _X86_64_PGTABLE_H */ | 287 | #endif /* ASM_X86__PGTABLE_64_H */ |
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h index b031efda37ec..70cf2bb05939 100644 --- a/include/asm-x86/posix_types_32.h +++ b/include/asm-x86/posix_types_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ARCH_I386_POSIX_TYPES_H | 1 | #ifndef ASM_X86__POSIX_TYPES_32_H |
2 | #define __ARCH_I386_POSIX_TYPES_H | 2 | #define ASM_X86__POSIX_TYPES_32_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file is generally used by user-level software, so you need to | 5 | * This file is generally used by user-level software, so you need to |
@@ -82,4 +82,4 @@ do { \ | |||
82 | 82 | ||
83 | #endif /* defined(__KERNEL__) */ | 83 | #endif /* defined(__KERNEL__) */ |
84 | 84 | ||
85 | #endif | 85 | #endif /* ASM_X86__POSIX_TYPES_32_H */ |
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h index d6624c95854a..388b4e7f4a44 100644 --- a/include/asm-x86/posix_types_64.h +++ b/include/asm-x86/posix_types_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_POSIX_TYPES_H | 1 | #ifndef ASM_X86__POSIX_TYPES_64_H |
2 | #define _ASM_X86_64_POSIX_TYPES_H | 2 | #define ASM_X86__POSIX_TYPES_64_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file is generally used by user-level software, so you need to | 5 | * This file is generally used by user-level software, so you need to |
@@ -116,4 +116,4 @@ static inline void __FD_ZERO(__kernel_fd_set *p) | |||
116 | 116 | ||
117 | #endif /* defined(__KERNEL__) */ | 117 | #endif /* defined(__KERNEL__) */ |
118 | 118 | ||
119 | #endif | 119 | #endif /* ASM_X86__POSIX_TYPES_64_H */ |
diff --git a/include/asm-x86/prctl.h b/include/asm-x86/prctl.h index 52952adef1ca..e7ae34eb4103 100644 --- a/include/asm-x86/prctl.h +++ b/include/asm-x86/prctl.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef X86_64_PRCTL_H | 1 | #ifndef ASM_X86__PRCTL_H |
2 | #define X86_64_PRCTL_H 1 | 2 | #define ASM_X86__PRCTL_H |
3 | 3 | ||
4 | #define ARCH_SET_GS 0x1001 | 4 | #define ARCH_SET_GS 0x1001 |
5 | #define ARCH_SET_FS 0x1002 | 5 | #define ARCH_SET_FS 0x1002 |
@@ -7,4 +7,4 @@ | |||
7 | #define ARCH_GET_GS 0x1004 | 7 | #define ARCH_GET_GS 0x1004 |
8 | 8 | ||
9 | 9 | ||
10 | #endif | 10 | #endif /* ASM_X86__PRCTL_H */ |
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h index 97568ada1f97..1198f2a0e42c 100644 --- a/include/asm-x86/processor-cyrix.h +++ b/include/asm-x86/processor-cyrix.h | |||
@@ -28,3 +28,11 @@ static inline void setCx86(u8 reg, u8 data) | |||
28 | outb(reg, 0x22); | 28 | outb(reg, 0x22); |
29 | outb(data, 0x23); | 29 | outb(data, 0x23); |
30 | } | 30 | } |
31 | |||
32 | #define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); }) | ||
33 | |||
34 | #define setCx86_old(reg, data) do { \ | ||
35 | outb((reg), 0x22); \ | ||
36 | outb((data), 0x23); \ | ||
37 | } while (0) | ||
38 | |||
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h index eff2ecd7fff0..dc5f0712f9fa 100644 --- a/include/asm-x86/processor-flags.h +++ b/include/asm-x86/processor-flags.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_I386_PROCESSOR_FLAGS_H | 1 | #ifndef ASM_X86__PROCESSOR_FLAGS_H |
2 | #define __ASM_I386_PROCESSOR_FLAGS_H | 2 | #define ASM_X86__PROCESSOR_FLAGS_H |
3 | /* Various flags defined: can be included from assembler. */ | 3 | /* Various flags defined: can be included from assembler. */ |
4 | 4 | ||
5 | /* | 5 | /* |
@@ -59,6 +59,7 @@ | |||
59 | #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ | 59 | #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ |
60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ | 60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ |
61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ | 61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ |
62 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | ||
62 | 63 | ||
63 | /* | 64 | /* |
64 | * x86-64 Task Priority Register, CR8 | 65 | * x86-64 Task Priority Register, CR8 |
@@ -96,4 +97,4 @@ | |||
96 | #endif | 97 | #endif |
97 | #endif | 98 | #endif |
98 | 99 | ||
99 | #endif /* __ASM_I386_PROCESSOR_FLAGS_H */ | 100 | #endif /* ASM_X86__PROCESSOR_FLAGS_H */ |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 4df3e2f6fb56..bbbbe1fc5ce1 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_X86_PROCESSOR_H | 1 | #ifndef ASM_X86__PROCESSOR_H |
2 | #define __ASM_X86_PROCESSOR_H | 2 | #define ASM_X86__PROCESSOR_H |
3 | 3 | ||
4 | #include <asm/processor-flags.h> | 4 | #include <asm/processor-flags.h> |
5 | 5 | ||
@@ -77,9 +77,9 @@ struct cpuinfo_x86 { | |||
77 | __u8 x86_phys_bits; | 77 | __u8 x86_phys_bits; |
78 | /* CPUID returned core id bits: */ | 78 | /* CPUID returned core id bits: */ |
79 | __u8 x86_coreid_bits; | 79 | __u8 x86_coreid_bits; |
80 | #endif | ||
80 | /* Max extended CPUID function supported: */ | 81 | /* Max extended CPUID function supported: */ |
81 | __u32 extended_cpuid_level; | 82 | __u32 extended_cpuid_level; |
82 | #endif | ||
83 | /* Maximum supported CPUID level, -1=no CPUID: */ | 83 | /* Maximum supported CPUID level, -1=no CPUID: */ |
84 | int cpuid_level; | 84 | int cpuid_level; |
85 | __u32 x86_capability[NCAPINTS]; | 85 | __u32 x86_capability[NCAPINTS]; |
@@ -140,6 +140,8 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | |||
140 | #define current_cpu_data boot_cpu_data | 140 | #define current_cpu_data boot_cpu_data |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | extern const struct seq_operations cpuinfo_op; | ||
144 | |||
143 | static inline int hlt_works(int cpu) | 145 | static inline int hlt_works(int cpu) |
144 | { | 146 | { |
145 | #ifdef CONFIG_X86_32 | 147 | #ifdef CONFIG_X86_32 |
@@ -153,6 +155,8 @@ static inline int hlt_works(int cpu) | |||
153 | 155 | ||
154 | extern void cpu_detect(struct cpuinfo_x86 *c); | 156 | extern void cpu_detect(struct cpuinfo_x86 *c); |
155 | 157 | ||
158 | extern struct pt_regs *idle_regs(struct pt_regs *); | ||
159 | |||
156 | extern void early_cpu_init(void); | 160 | extern void early_cpu_init(void); |
157 | extern void identify_boot_cpu(void); | 161 | extern void identify_boot_cpu(void); |
158 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | 162 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
@@ -161,6 +165,7 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | |||
161 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 165 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
162 | extern unsigned short num_cache_leaves; | 166 | extern unsigned short num_cache_leaves; |
163 | 167 | ||
168 | extern void detect_extended_topology(struct cpuinfo_x86 *c); | ||
164 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | 169 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) |
165 | extern void detect_ht(struct cpuinfo_x86 *c); | 170 | extern void detect_ht(struct cpuinfo_x86 *c); |
166 | #else | 171 | #else |
@@ -322,7 +327,12 @@ struct i387_fxsave_struct { | |||
322 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | 327 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ |
323 | u32 xmm_space[64]; | 328 | u32 xmm_space[64]; |
324 | 329 | ||
325 | u32 padding[24]; | 330 | u32 padding[12]; |
331 | |||
332 | union { | ||
333 | u32 padding1[12]; | ||
334 | u32 sw_reserved[12]; | ||
335 | }; | ||
326 | 336 | ||
327 | } __attribute__((aligned(16))); | 337 | } __attribute__((aligned(16))); |
328 | 338 | ||
@@ -346,10 +356,23 @@ struct i387_soft_struct { | |||
346 | u32 entry_eip; | 356 | u32 entry_eip; |
347 | }; | 357 | }; |
348 | 358 | ||
359 | struct xsave_hdr_struct { | ||
360 | u64 xstate_bv; | ||
361 | u64 reserved1[2]; | ||
362 | u64 reserved2[5]; | ||
363 | } __attribute__((packed)); | ||
364 | |||
365 | struct xsave_struct { | ||
366 | struct i387_fxsave_struct i387; | ||
367 | struct xsave_hdr_struct xsave_hdr; | ||
368 | /* new processor state extensions will go here */ | ||
369 | } __attribute__ ((packed, aligned (64))); | ||
370 | |||
349 | union thread_xstate { | 371 | union thread_xstate { |
350 | struct i387_fsave_struct fsave; | 372 | struct i387_fsave_struct fsave; |
351 | struct i387_fxsave_struct fxsave; | 373 | struct i387_fxsave_struct fxsave; |
352 | struct i387_soft_struct soft; | 374 | struct i387_soft_struct soft; |
375 | struct xsave_struct xsave; | ||
353 | }; | 376 | }; |
354 | 377 | ||
355 | #ifdef CONFIG_X86_64 | 378 | #ifdef CONFIG_X86_64 |
@@ -943,4 +966,4 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, | |||
943 | extern int get_tsc_mode(unsigned long adr); | 966 | extern int get_tsc_mode(unsigned long adr); |
944 | extern int set_tsc_mode(unsigned int val); | 967 | extern int set_tsc_mode(unsigned int val); |
945 | 968 | ||
946 | #endif | 969 | #endif /* ASM_X86__PROCESSOR_H */ |
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 3dd458c385c0..6e89e8b4de0e 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X8664_PROTO_H | 1 | #ifndef ASM_X86__PROTO_H |
2 | #define _ASM_X8664_PROTO_H 1 | 2 | #define ASM_X86__PROTO_H |
3 | 3 | ||
4 | #include <asm/ldt.h> | 4 | #include <asm/ldt.h> |
5 | 5 | ||
@@ -29,4 +29,4 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | |||
29 | #define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) | 29 | #define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) |
30 | #define round_down(x, y) ((x) & ~((y) - 1)) | 30 | #define round_down(x, y) ((x) & ~((y) - 1)) |
31 | 31 | ||
32 | #endif | 32 | #endif /* ASM_X86__PROTO_H */ |
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h index 72e7b9db29bb..d0cf3344a586 100644 --- a/include/asm-x86/ptrace-abi.h +++ b/include/asm-x86/ptrace-abi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PTRACE_ABI_H | 1 | #ifndef ASM_X86__PTRACE_ABI_H |
2 | #define _ASM_X86_PTRACE_ABI_H | 2 | #define ASM_X86__PTRACE_ABI_H |
3 | 3 | ||
4 | #ifdef __i386__ | 4 | #ifdef __i386__ |
5 | 5 | ||
@@ -140,4 +140,4 @@ struct ptrace_bts_config { | |||
140 | Returns number of BTS records drained. | 140 | Returns number of BTS records drained. |
141 | */ | 141 | */ |
142 | 142 | ||
143 | #endif | 143 | #endif /* ASM_X86__PTRACE_ABI_H */ |
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 8a71db803da6..66ff7bd47379 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PTRACE_H | 1 | #ifndef ASM_X86__PTRACE_H |
2 | #define _ASM_X86_PTRACE_H | 2 | #define ASM_X86__PTRACE_H |
3 | 3 | ||
4 | #include <linux/compiler.h> /* For __user */ | 4 | #include <linux/compiler.h> /* For __user */ |
5 | #include <asm/ptrace-abi.h> | 5 | #include <asm/ptrace-abi.h> |
@@ -148,6 +148,9 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |||
148 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | 148 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); |
149 | #endif | 149 | #endif |
150 | 150 | ||
151 | extern long syscall_trace_enter(struct pt_regs *); | ||
152 | extern void syscall_trace_leave(struct pt_regs *); | ||
153 | |||
151 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 154 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
152 | { | 155 | { |
153 | return regs->ax; | 156 | return regs->ax; |
@@ -239,4 +242,4 @@ extern int do_set_thread_area(struct task_struct *p, int idx, | |||
239 | 242 | ||
240 | #endif /* !__ASSEMBLY__ */ | 243 | #endif /* !__ASSEMBLY__ */ |
241 | 244 | ||
242 | #endif | 245 | #endif /* ASM_X86__PTRACE_H */ |
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h index 6857f840b243..edb3b4ecfc81 100644 --- a/include/asm-x86/pvclock-abi.h +++ b/include/asm-x86/pvclock-abi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PVCLOCK_ABI_H_ | 1 | #ifndef ASM_X86__PVCLOCK_ABI_H |
2 | #define _ASM_X86_PVCLOCK_ABI_H_ | 2 | #define ASM_X86__PVCLOCK_ABI_H |
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | 4 | ||
5 | /* | 5 | /* |
@@ -39,4 +39,4 @@ struct pvclock_wall_clock { | |||
39 | } __attribute__((__packed__)); | 39 | } __attribute__((__packed__)); |
40 | 40 | ||
41 | #endif /* __ASSEMBLY__ */ | 41 | #endif /* __ASSEMBLY__ */ |
42 | #endif /* _ASM_X86_PVCLOCK_ABI_H_ */ | 42 | #endif /* ASM_X86__PVCLOCK_ABI_H */ |
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h index 85b1bba8e0a3..1a38f6834800 100644 --- a/include/asm-x86/pvclock.h +++ b/include/asm-x86/pvclock.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_PVCLOCK_H_ | 1 | #ifndef ASM_X86__PVCLOCK_H |
2 | #define _ASM_X86_PVCLOCK_H_ | 2 | #define ASM_X86__PVCLOCK_H |
3 | 3 | ||
4 | #include <linux/clocksource.h> | 4 | #include <linux/clocksource.h> |
5 | #include <asm/pvclock-abi.h> | 5 | #include <asm/pvclock-abi.h> |
@@ -10,4 +10,4 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall, | |||
10 | struct pvclock_vcpu_time_info *vcpu, | 10 | struct pvclock_vcpu_time_info *vcpu, |
11 | struct timespec *ts); | 11 | struct timespec *ts); |
12 | 12 | ||
13 | #endif /* _ASM_X86_PVCLOCK_H_ */ | 13 | #endif /* ASM_X86__PVCLOCK_H */ |
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index 206f355786dc..1c2f0ce9e31e 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_REBOOT_H | 1 | #ifndef ASM_X86__REBOOT_H |
2 | #define _ASM_REBOOT_H | 2 | #define ASM_X86__REBOOT_H |
3 | 3 | ||
4 | struct pt_regs; | 4 | struct pt_regs; |
5 | 5 | ||
@@ -18,4 +18,4 @@ void native_machine_crash_shutdown(struct pt_regs *regs); | |||
18 | void native_machine_shutdown(void); | 18 | void native_machine_shutdown(void); |
19 | void machine_real_restart(const unsigned char *code, int length); | 19 | void machine_real_restart(const unsigned char *code, int length); |
20 | 20 | ||
21 | #endif /* _ASM_REBOOT_H */ | 21 | #endif /* ASM_X86__REBOOT_H */ |
diff --git a/include/asm-x86/reboot_fixups.h b/include/asm-x86/reboot_fixups.h index 0cb7d87c2b68..2c2987d97570 100644 --- a/include/asm-x86/reboot_fixups.h +++ b/include/asm-x86/reboot_fixups.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #ifndef _LINUX_REBOOT_FIXUPS_H | 1 | #ifndef ASM_X86__REBOOT_FIXUPS_H |
2 | #define _LINUX_REBOOT_FIXUPS_H | 2 | #define ASM_X86__REBOOT_FIXUPS_H |
3 | 3 | ||
4 | extern void mach_reboot_fixups(void); | 4 | extern void mach_reboot_fixups(void); |
5 | 5 | ||
6 | #endif /* _LINUX_REBOOT_FIXUPS_H */ | 6 | #endif /* ASM_X86__REBOOT_FIXUPS_H */ |
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h index adec887dd7cd..a01c4e376331 100644 --- a/include/asm-x86/required-features.h +++ b/include/asm-x86/required-features.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_REQUIRED_FEATURES_H | 1 | #ifndef ASM_X86__REQUIRED_FEATURES_H |
2 | #define _ASM_REQUIRED_FEATURES_H 1 | 2 | #define ASM_X86__REQUIRED_FEATURES_H |
3 | 3 | ||
4 | /* Define minimum CPUID feature set for kernel These bits are checked | 4 | /* Define minimum CPUID feature set for kernel These bits are checked |
5 | really early to actually display a visible error message before the | 5 | really early to actually display a visible error message before the |
@@ -41,6 +41,12 @@ | |||
41 | # define NEED_3DNOW 0 | 41 | # define NEED_3DNOW 0 |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) | ||
45 | # define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) | ||
46 | #else | ||
47 | # define NEED_NOPL 0 | ||
48 | #endif | ||
49 | |||
44 | #ifdef CONFIG_X86_64 | 50 | #ifdef CONFIG_X86_64 |
45 | #define NEED_PSE 0 | 51 | #define NEED_PSE 0 |
46 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) | 52 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
@@ -67,10 +73,10 @@ | |||
67 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) | 73 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
68 | 74 | ||
69 | #define REQUIRED_MASK2 0 | 75 | #define REQUIRED_MASK2 0 |
70 | #define REQUIRED_MASK3 0 | 76 | #define REQUIRED_MASK3 (NEED_NOPL) |
71 | #define REQUIRED_MASK4 0 | 77 | #define REQUIRED_MASK4 0 |
72 | #define REQUIRED_MASK5 0 | 78 | #define REQUIRED_MASK5 0 |
73 | #define REQUIRED_MASK6 0 | 79 | #define REQUIRED_MASK6 0 |
74 | #define REQUIRED_MASK7 0 | 80 | #define REQUIRED_MASK7 0 |
75 | 81 | ||
76 | #endif | 82 | #endif /* ASM_X86__REQUIRED_FEATURES_H */ |
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 8d9f0b41ee86..519a8ecbfc95 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_RESUME_TRACE_H | 1 | #ifndef ASM_X86__RESUME_TRACE_H |
2 | #define _ASM_X86_RESUME_TRACE_H | 2 | #define ASM_X86__RESUME_TRACE_H |
3 | 3 | ||
4 | #include <asm/asm.h> | 4 | #include <asm/asm.h> |
5 | 5 | ||
@@ -18,4 +18,4 @@ do { \ | |||
18 | } \ | 18 | } \ |
19 | } while (0) | 19 | } while (0) |
20 | 20 | ||
21 | #endif | 21 | #endif /* ASM_X86__RESUME_TRACE_H */ |
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index c9448bd8968f..5e1256bdee83 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h | |||
@@ -5,8 +5,8 @@ | |||
5 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | 5 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef __ASM_RIO_H | 8 | #ifndef ASM_X86__RIO_H |
9 | #define __ASM_RIO_H | 9 | #define ASM_X86__RIO_H |
10 | 10 | ||
11 | #define RIO_TABLE_VERSION 3 | 11 | #define RIO_TABLE_VERSION 3 |
12 | 12 | ||
@@ -60,4 +60,4 @@ enum { | |||
60 | ALT_CALGARY = 5, /* Second Planar Calgary */ | 60 | ALT_CALGARY = 5, /* Second Planar Calgary */ |
61 | }; | 61 | }; |
62 | 62 | ||
63 | #endif /* __ASM_RIO_H */ | 63 | #endif /* ASM_X86__RIO_H */ |
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h index 6a8c0d645108..48a3109e1a7d 100644 --- a/include/asm-x86/rwlock.h +++ b/include/asm-x86/rwlock.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef _ASM_X86_RWLOCK_H | 1 | #ifndef ASM_X86__RWLOCK_H |
2 | #define _ASM_X86_RWLOCK_H | 2 | #define ASM_X86__RWLOCK_H |
3 | 3 | ||
4 | #define RW_LOCK_BIAS 0x01000000 | 4 | #define RW_LOCK_BIAS 0x01000000 |
5 | 5 | ||
6 | /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ | 6 | /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ |
7 | 7 | ||
8 | #endif /* _ASM_X86_RWLOCK_H */ | 8 | #endif /* ASM_X86__RWLOCK_H */ |
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 750f2a3542b3..3ff3015b71a8 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h | |||
@@ -29,8 +29,8 @@ | |||
29 | * front, then they'll all be woken up, but no other readers will be. | 29 | * front, then they'll all be woken up, but no other readers will be. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #ifndef _I386_RWSEM_H | 32 | #ifndef ASM_X86__RWSEM_H |
33 | #define _I386_RWSEM_H | 33 | #define ASM_X86__RWSEM_H |
34 | 34 | ||
35 | #ifndef _LINUX_RWSEM_H | 35 | #ifndef _LINUX_RWSEM_H |
36 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" | 36 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" |
@@ -262,4 +262,4 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | #endif /* __KERNEL__ */ | 264 | #endif /* __KERNEL__ */ |
265 | #endif /* _I386_RWSEM_H */ | 265 | #endif /* ASM_X86__RWSEM_H */ |
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h index c0432061f81a..ee48f880005d 100644 --- a/include/asm-x86/scatterlist.h +++ b/include/asm-x86/scatterlist.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SCATTERLIST_H | 1 | #ifndef ASM_X86__SCATTERLIST_H |
2 | #define _ASM_X86_SCATTERLIST_H | 2 | #define ASM_X86__SCATTERLIST_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | 5 | ||
@@ -30,4 +30,4 @@ struct scatterlist { | |||
30 | # define sg_dma_len(sg) ((sg)->dma_length) | 30 | # define sg_dma_len(sg) ((sg)->dma_length) |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #endif | 33 | #endif /* ASM_X86__SCATTERLIST_H */ |
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h index 36e71c5f306f..cf9ab2dbcef1 100644 --- a/include/asm-x86/seccomp_32.h +++ b/include/asm-x86/seccomp_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_SECCOMP_H | 1 | #ifndef ASM_X86__SECCOMP_32_H |
2 | #define _ASM_SECCOMP_H | 2 | #define ASM_X86__SECCOMP_32_H |
3 | 3 | ||
4 | #include <linux/thread_info.h> | 4 | #include <linux/thread_info.h> |
5 | 5 | ||
@@ -14,4 +14,4 @@ | |||
14 | #define __NR_seccomp_exit __NR_exit | 14 | #define __NR_seccomp_exit __NR_exit |
15 | #define __NR_seccomp_sigreturn __NR_sigreturn | 15 | #define __NR_seccomp_sigreturn __NR_sigreturn |
16 | 16 | ||
17 | #endif /* _ASM_SECCOMP_H */ | 17 | #endif /* ASM_X86__SECCOMP_32_H */ |
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h index 76cfe69aa63c..03274cea751f 100644 --- a/include/asm-x86/seccomp_64.h +++ b/include/asm-x86/seccomp_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_SECCOMP_H | 1 | #ifndef ASM_X86__SECCOMP_64_H |
2 | #define _ASM_SECCOMP_H | 2 | #define ASM_X86__SECCOMP_64_H |
3 | 3 | ||
4 | #include <linux/thread_info.h> | 4 | #include <linux/thread_info.h> |
5 | 5 | ||
@@ -22,4 +22,4 @@ | |||
22 | #define __NR_seccomp_exit_32 __NR_ia32_exit | 22 | #define __NR_seccomp_exit_32 __NR_ia32_exit |
23 | #define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn | 23 | #define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn |
24 | 24 | ||
25 | #endif /* _ASM_SECCOMP_H */ | 25 | #endif /* ASM_X86__SECCOMP_64_H */ |
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 646452ea9ea3..ea5f0a8686f7 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SEGMENT_H_ | 1 | #ifndef ASM_X86__SEGMENT_H |
2 | #define _ASM_X86_SEGMENT_H_ | 2 | #define ASM_X86__SEGMENT_H |
3 | 3 | ||
4 | /* Constructor for a conventional segment GDT (or LDT) entry */ | 4 | /* Constructor for a conventional segment GDT (or LDT) entry */ |
5 | /* This is a macro so it can be used in initializers */ | 5 | /* This is a macro so it can be used in initializers */ |
@@ -212,4 +212,4 @@ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; | |||
212 | #endif | 212 | #endif |
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | #endif | 215 | #endif /* ASM_X86__SEGMENT_H */ |
diff --git a/include/asm-x86/sembuf.h b/include/asm-x86/sembuf.h index ee50c801f7b7..81f06b7e5a3f 100644 --- a/include/asm-x86/sembuf.h +++ b/include/asm-x86/sembuf.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SEMBUF_H | 1 | #ifndef ASM_X86__SEMBUF_H |
2 | #define _ASM_X86_SEMBUF_H | 2 | #define ASM_X86__SEMBUF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The semid64_ds structure for x86 architecture. | 5 | * The semid64_ds structure for x86 architecture. |
@@ -21,4 +21,4 @@ struct semid64_ds { | |||
21 | unsigned long __unused4; | 21 | unsigned long __unused4; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | #endif /* _ASM_X86_SEMBUF_H */ | 24 | #endif /* ASM_X86__SEMBUF_H */ |
diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h index 628c801535ea..303660b671e5 100644 --- a/include/asm-x86/serial.h +++ b/include/asm-x86/serial.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SERIAL_H | 1 | #ifndef ASM_X86__SERIAL_H |
2 | #define _ASM_X86_SERIAL_H | 2 | #define ASM_X86__SERIAL_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This assumes you have a 1.8432 MHz clock for your UART. | 5 | * This assumes you have a 1.8432 MHz clock for your UART. |
@@ -26,4 +26,4 @@ | |||
26 | { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ | 26 | { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ |
27 | { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ | 27 | { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ |
28 | 28 | ||
29 | #endif /* _ASM_X86_SERIAL_H */ | 29 | #endif /* ASM_X86__SERIAL_H */ |
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index a07c6f1c01e1..11b6cc14b289 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SETUP_H | 1 | #ifndef ASM_X86__SETUP_H |
2 | #define _ASM_X86_SETUP_H | 2 | #define ASM_X86__SETUP_H |
3 | 3 | ||
4 | #define COMMAND_LINE_SIZE 2048 | 4 | #define COMMAND_LINE_SIZE 2048 |
5 | 5 | ||
@@ -38,9 +38,11 @@ struct x86_quirks { | |||
38 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); | 38 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); |
39 | void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, | 39 | void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, |
40 | unsigned short oemsize); | 40 | unsigned short oemsize); |
41 | int (*setup_ioapic_ids)(void); | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | extern struct x86_quirks *x86_quirks; | 44 | extern struct x86_quirks *x86_quirks; |
45 | extern unsigned long saved_video_mode; | ||
44 | 46 | ||
45 | #ifndef CONFIG_PARAVIRT | 47 | #ifndef CONFIG_PARAVIRT |
46 | #define paravirt_post_allocator_init() do {} while (0) | 48 | #define paravirt_post_allocator_init() do {} while (0) |
@@ -100,4 +102,4 @@ void __init x86_64_start_reservations(char *real_mode_data); | |||
100 | #endif /* __ASSEMBLY__ */ | 102 | #endif /* __ASSEMBLY__ */ |
101 | #endif /* __KERNEL__ */ | 103 | #endif /* __KERNEL__ */ |
102 | 104 | ||
103 | #endif /* _ASM_X86_SETUP_H */ | 105 | #endif /* ASM_X86__SETUP_H */ |
diff --git a/include/asm-x86/shmbuf.h b/include/asm-x86/shmbuf.h index b51413b74971..f51aec2298e9 100644 --- a/include/asm-x86/shmbuf.h +++ b/include/asm-x86/shmbuf.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SHMBUF_H | 1 | #ifndef ASM_X86__SHMBUF_H |
2 | #define _ASM_X86_SHMBUF_H | 2 | #define ASM_X86__SHMBUF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The shmid64_ds structure for x86 architecture. | 5 | * The shmid64_ds structure for x86 architecture. |
@@ -48,4 +48,4 @@ struct shminfo64 { | |||
48 | unsigned long __unused4; | 48 | unsigned long __unused4; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #endif /* _ASM_X86_SHMBUF_H */ | 51 | #endif /* ASM_X86__SHMBUF_H */ |
diff --git a/include/asm-x86/shmparam.h b/include/asm-x86/shmparam.h index 0880cf0917b9..a83a1fd96a0e 100644 --- a/include/asm-x86/shmparam.h +++ b/include/asm-x86/shmparam.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #ifndef _ASM_X86_SHMPARAM_H | 1 | #ifndef ASM_X86__SHMPARAM_H |
2 | #define _ASM_X86_SHMPARAM_H | 2 | #define ASM_X86__SHMPARAM_H |
3 | 3 | ||
4 | #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ | 4 | #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ |
5 | 5 | ||
6 | #endif /* _ASM_X86_SHMPARAM_H */ | 6 | #endif /* ASM_X86__SHMPARAM_H */ |
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index 2f9c884d2c0f..ee813f4fe5d5 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h | |||
@@ -1,9 +1,43 @@ | |||
1 | #ifndef _ASM_X86_SIGCONTEXT_H | 1 | #ifndef ASM_X86__SIGCONTEXT_H |
2 | #define _ASM_X86_SIGCONTEXT_H | 2 | #define ASM_X86__SIGCONTEXT_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/types.h> | 5 | #include <asm/types.h> |
6 | 6 | ||
7 | #define FP_XSTATE_MAGIC1 0x46505853U | ||
8 | #define FP_XSTATE_MAGIC2 0x46505845U | ||
9 | #define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) | ||
10 | |||
11 | /* | ||
12 | * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame | ||
13 | * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes | ||
14 | * are used to extended the fpstate pointer in the sigcontext, which now | ||
15 | * includes the extended state information along with fpstate information. | ||
16 | * | ||
17 | * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved | ||
18 | * area and FP_XSTATE_MAGIC2 at the end of memory layout | ||
19 | * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the | ||
20 | * extended state information in the memory layout pointed by the fpstate | ||
21 | * pointer in sigcontext. | ||
22 | */ | ||
23 | struct _fpx_sw_bytes { | ||
24 | __u32 magic1; /* FP_XSTATE_MAGIC1 */ | ||
25 | __u32 extended_size; /* total size of the layout referred by | ||
26 | * fpstate pointer in the sigcontext. | ||
27 | */ | ||
28 | __u64 xstate_bv; | ||
29 | /* feature bit mask (including fp/sse/extended | ||
30 | * state) that is present in the memory | ||
31 | * layout. | ||
32 | */ | ||
33 | __u32 xstate_size; /* actual xsave state size, based on the | ||
34 | * features saved in the layout. | ||
35 | * 'extended_size' will be greater than | ||
36 | * 'xstate_size'. | ||
37 | */ | ||
38 | __u32 padding[7]; /* for future use. */ | ||
39 | }; | ||
40 | |||
7 | #ifdef __i386__ | 41 | #ifdef __i386__ |
8 | /* | 42 | /* |
9 | * As documented in the iBCS2 standard.. | 43 | * As documented in the iBCS2 standard.. |
@@ -53,7 +87,13 @@ struct _fpstate { | |||
53 | unsigned long reserved; | 87 | unsigned long reserved; |
54 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ | 88 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
55 | struct _xmmreg _xmm[8]; | 89 | struct _xmmreg _xmm[8]; |
56 | unsigned long padding[56]; | 90 | unsigned long padding1[44]; |
91 | |||
92 | union { | ||
93 | unsigned long padding2[12]; | ||
94 | struct _fpx_sw_bytes sw_reserved; /* represents the extended | ||
95 | * state info */ | ||
96 | }; | ||
57 | }; | 97 | }; |
58 | 98 | ||
59 | #define X86_FXSR_MAGIC 0x0000 | 99 | #define X86_FXSR_MAGIC 0x0000 |
@@ -79,7 +119,15 @@ struct sigcontext { | |||
79 | unsigned long flags; | 119 | unsigned long flags; |
80 | unsigned long sp_at_signal; | 120 | unsigned long sp_at_signal; |
81 | unsigned short ss, __ssh; | 121 | unsigned short ss, __ssh; |
82 | struct _fpstate __user *fpstate; | 122 | |
123 | /* | ||
124 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | ||
125 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | ||
126 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | ||
127 | * of extended memory layout. See comments at the defintion of | ||
128 | * (struct _fpx_sw_bytes) | ||
129 | */ | ||
130 | void __user *fpstate; /* zero when no FPU/extended context */ | ||
83 | unsigned long oldmask; | 131 | unsigned long oldmask; |
84 | unsigned long cr2; | 132 | unsigned long cr2; |
85 | }; | 133 | }; |
@@ -130,7 +178,12 @@ struct _fpstate { | |||
130 | __u32 mxcsr_mask; | 178 | __u32 mxcsr_mask; |
131 | __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ | 179 | __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ |
132 | __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ | 180 | __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ |
133 | __u32 reserved2[24]; | 181 | __u32 reserved2[12]; |
182 | union { | ||
183 | __u32 reserved3[12]; | ||
184 | struct _fpx_sw_bytes sw_reserved; /* represents the extended | ||
185 | * state information */ | ||
186 | }; | ||
134 | }; | 187 | }; |
135 | 188 | ||
136 | #ifdef __KERNEL__ | 189 | #ifdef __KERNEL__ |
@@ -161,7 +214,15 @@ struct sigcontext { | |||
161 | unsigned long trapno; | 214 | unsigned long trapno; |
162 | unsigned long oldmask; | 215 | unsigned long oldmask; |
163 | unsigned long cr2; | 216 | unsigned long cr2; |
164 | struct _fpstate __user *fpstate; /* zero when no FPU context */ | 217 | |
218 | /* | ||
219 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | ||
220 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | ||
221 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | ||
222 | * of extended memory layout. See comments at the defintion of | ||
223 | * (struct _fpx_sw_bytes) | ||
224 | */ | ||
225 | void __user *fpstate; /* zero when no FPU/extended context */ | ||
165 | unsigned long reserved1[8]; | 226 | unsigned long reserved1[8]; |
166 | }; | 227 | }; |
167 | #else /* __KERNEL__ */ | 228 | #else /* __KERNEL__ */ |
@@ -202,4 +263,22 @@ struct sigcontext { | |||
202 | 263 | ||
203 | #endif /* !__i386__ */ | 264 | #endif /* !__i386__ */ |
204 | 265 | ||
205 | #endif | 266 | struct _xsave_hdr { |
267 | __u64 xstate_bv; | ||
268 | __u64 reserved1[2]; | ||
269 | __u64 reserved2[5]; | ||
270 | }; | ||
271 | |||
272 | /* | ||
273 | * Extended state pointed by the fpstate pointer in the sigcontext. | ||
274 | * In addition to the fpstate, information encoded in the xstate_hdr | ||
275 | * indicates the presence of other extended state information | ||
276 | * supported by the processor and OS. | ||
277 | */ | ||
278 | struct _xstate { | ||
279 | struct _fpstate fpstate; | ||
280 | struct _xsave_hdr xstate_hdr; | ||
281 | /* new processor state extensions go here */ | ||
282 | }; | ||
283 | |||
284 | #endif /* ASM_X86__SIGCONTEXT_H */ | ||
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 57a9686fb491..8c347032c2f2 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _SIGCONTEXT32_H | 1 | #ifndef ASM_X86__SIGCONTEXT32_H |
2 | #define _SIGCONTEXT32_H 1 | 2 | #define ASM_X86__SIGCONTEXT32_H |
3 | 3 | ||
4 | /* signal context for 32bit programs. */ | 4 | /* signal context for 32bit programs. */ |
5 | 5 | ||
@@ -40,7 +40,11 @@ struct _fpstate_ia32 { | |||
40 | __u32 reserved; | 40 | __u32 reserved; |
41 | struct _fpxreg _fxsr_st[8]; | 41 | struct _fpxreg _fxsr_st[8]; |
42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ | 42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ |
43 | __u32 padding[56]; | 43 | __u32 padding[44]; |
44 | union { | ||
45 | __u32 padding2[12]; | ||
46 | struct _fpx_sw_bytes sw_reserved; | ||
47 | }; | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | struct sigcontext_ia32 { | 50 | struct sigcontext_ia32 { |
@@ -68,4 +72,4 @@ struct sigcontext_ia32 { | |||
68 | unsigned int cr2; | 72 | unsigned int cr2; |
69 | }; | 73 | }; |
70 | 74 | ||
71 | #endif | 75 | #endif /* ASM_X86__SIGCONTEXT32_H */ |
diff --git a/include/asm-x86/siginfo.h b/include/asm-x86/siginfo.h index a477bea0c2a1..808bdfb2958c 100644 --- a/include/asm-x86/siginfo.h +++ b/include/asm-x86/siginfo.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SIGINFO_H | 1 | #ifndef ASM_X86__SIGINFO_H |
2 | #define _ASM_X86_SIGINFO_H | 2 | #define ASM_X86__SIGINFO_H |
3 | 3 | ||
4 | #ifdef __x86_64__ | 4 | #ifdef __x86_64__ |
5 | # define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) | 5 | # define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) |
@@ -7,4 +7,4 @@ | |||
7 | 7 | ||
8 | #include <asm-generic/siginfo.h> | 8 | #include <asm-generic/siginfo.h> |
9 | 9 | ||
10 | #endif | 10 | #endif /* ASM_X86__SIGINFO_H */ |
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index 6dac49364e95..65acc82d267a 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SIGNAL_H | 1 | #ifndef ASM_X86__SIGNAL_H |
2 | #define _ASM_X86_SIGNAL_H | 2 | #define ASM_X86__SIGNAL_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
@@ -140,6 +140,9 @@ struct sigaction { | |||
140 | struct k_sigaction { | 140 | struct k_sigaction { |
141 | struct sigaction sa; | 141 | struct sigaction sa; |
142 | }; | 142 | }; |
143 | |||
144 | extern void do_notify_resume(struct pt_regs *, void *, __u32); | ||
145 | |||
143 | # else /* __KERNEL__ */ | 146 | # else /* __KERNEL__ */ |
144 | /* Here we must cater to libcs that poke about in kernel headers. */ | 147 | /* Here we must cater to libcs that poke about in kernel headers. */ |
145 | 148 | ||
@@ -256,4 +259,4 @@ struct pt_regs; | |||
256 | #endif /* __KERNEL__ */ | 259 | #endif /* __KERNEL__ */ |
257 | #endif /* __ASSEMBLY__ */ | 260 | #endif /* __ASSEMBLY__ */ |
258 | 261 | ||
259 | #endif | 262 | #endif /* ASM_X86__SIGNAL_H */ |
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 3c877f74f279..29324c103341 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SMP_H_ | 1 | #ifndef ASM_X86__SMP_H |
2 | #define _ASM_X86_SMP_H_ | 2 | #define ASM_X86__SMP_H |
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
@@ -34,6 +34,9 @@ extern cpumask_t cpu_initialized; | |||
34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | 35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); |
36 | DECLARE_PER_CPU(u16, cpu_llc_id); | 36 | DECLARE_PER_CPU(u16, cpu_llc_id); |
37 | #ifdef CONFIG_X86_32 | ||
38 | DECLARE_PER_CPU(int, cpu_number); | ||
39 | #endif | ||
37 | 40 | ||
38 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | 41 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); |
39 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | 42 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); |
@@ -142,7 +145,6 @@ extern unsigned disabled_cpus __cpuinitdata; | |||
142 | * from the initial startup. We map APIC_BASE very early in page_setup(), | 145 | * from the initial startup. We map APIC_BASE very early in page_setup(), |
143 | * so this is correct in the x86 case. | 146 | * so this is correct in the x86 case. |
144 | */ | 147 | */ |
145 | DECLARE_PER_CPU(int, cpu_number); | ||
146 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | 148 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) |
147 | extern int safe_smp_processor_id(void); | 149 | extern int safe_smp_processor_id(void); |
148 | 150 | ||
@@ -165,30 +167,33 @@ extern int safe_smp_processor_id(void); | |||
165 | 167 | ||
166 | #ifdef CONFIG_X86_LOCAL_APIC | 168 | #ifdef CONFIG_X86_LOCAL_APIC |
167 | 169 | ||
170 | #ifndef CONFIG_X86_64 | ||
168 | static inline int logical_smp_processor_id(void) | 171 | static inline int logical_smp_processor_id(void) |
169 | { | 172 | { |
170 | /* we don't want to mark this access volatile - bad code generation */ | 173 | /* we don't want to mark this access volatile - bad code generation */ |
171 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | 174 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); |
172 | } | 175 | } |
173 | 176 | ||
174 | #ifndef CONFIG_X86_64 | 177 | #include <mach_apicdef.h> |
175 | static inline unsigned int read_apic_id(void) | 178 | static inline unsigned int read_apic_id(void) |
176 | { | 179 | { |
177 | return *(u32 *)(APIC_BASE + APIC_ID); | 180 | unsigned int reg; |
181 | |||
182 | reg = *(u32 *)(APIC_BASE + APIC_ID); | ||
183 | |||
184 | return GET_APIC_ID(reg); | ||
178 | } | 185 | } |
179 | #else | ||
180 | extern unsigned int read_apic_id(void); | ||
181 | #endif | 186 | #endif |
182 | 187 | ||
183 | 188 | ||
184 | # ifdef APIC_DEFINITION | 189 | # if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) |
185 | extern int hard_smp_processor_id(void); | 190 | extern int hard_smp_processor_id(void); |
186 | # else | 191 | # else |
187 | # include <mach_apicdef.h> | 192 | #include <mach_apicdef.h> |
188 | static inline int hard_smp_processor_id(void) | 193 | static inline int hard_smp_processor_id(void) |
189 | { | 194 | { |
190 | /* we don't want to mark this access volatile - bad code generation */ | 195 | /* we don't want to mark this access volatile - bad code generation */ |
191 | return GET_APIC_ID(read_apic_id()); | 196 | return read_apic_id(); |
192 | } | 197 | } |
193 | # endif /* APIC_DEFINITION */ | 198 | # endif /* APIC_DEFINITION */ |
194 | 199 | ||
@@ -205,4 +210,4 @@ extern void cpu_uninit(void); | |||
205 | #endif | 210 | #endif |
206 | 211 | ||
207 | #endif /* __ASSEMBLY__ */ | 212 | #endif /* __ASSEMBLY__ */ |
208 | #endif | 213 | #endif /* ASM_X86__SMP_H */ |
diff --git a/include/asm-x86/socket.h b/include/asm-x86/socket.h index 80af9c4ccad7..db73274c83c3 100644 --- a/include/asm-x86/socket.h +++ b/include/asm-x86/socket.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_SOCKET_H | 1 | #ifndef ASM_X86__SOCKET_H |
2 | #define _ASM_SOCKET_H | 2 | #define ASM_X86__SOCKET_H |
3 | 3 | ||
4 | #include <asm/sockios.h> | 4 | #include <asm/sockios.h> |
5 | 5 | ||
@@ -54,4 +54,4 @@ | |||
54 | 54 | ||
55 | #define SO_MARK 36 | 55 | #define SO_MARK 36 |
56 | 56 | ||
57 | #endif /* _ASM_SOCKET_H */ | 57 | #endif /* ASM_X86__SOCKET_H */ |
diff --git a/include/asm-x86/sockios.h b/include/asm-x86/sockios.h index 49cc72b5d3c9..a006704fdc84 100644 --- a/include/asm-x86/sockios.h +++ b/include/asm-x86/sockios.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SOCKIOS_H | 1 | #ifndef ASM_X86__SOCKIOS_H |
2 | #define _ASM_X86_SOCKIOS_H | 2 | #define ASM_X86__SOCKIOS_H |
3 | 3 | ||
4 | /* Socket-level I/O control calls. */ | 4 | /* Socket-level I/O control calls. */ |
5 | #define FIOSETOWN 0x8901 | 5 | #define FIOSETOWN 0x8901 |
@@ -10,4 +10,4 @@ | |||
10 | #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ | 10 | #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ |
11 | #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ | 11 | #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ |
12 | 12 | ||
13 | #endif /* _ASM_X86_SOCKIOS_H */ | 13 | #endif /* ASM_X86__SOCKIOS_H */ |
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index 9bd48b0a534b..38f8e6bc3186 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SPARSEMEM_H | 1 | #ifndef ASM_X86__SPARSEMEM_H |
2 | #define _ASM_X86_SPARSEMEM_H | 2 | #define ASM_X86__SPARSEMEM_H |
3 | 3 | ||
4 | #ifdef CONFIG_SPARSEMEM | 4 | #ifdef CONFIG_SPARSEMEM |
5 | /* | 5 | /* |
@@ -31,4 +31,4 @@ | |||
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #endif /* CONFIG_SPARSEMEM */ | 33 | #endif /* CONFIG_SPARSEMEM */ |
34 | #endif | 34 | #endif /* ASM_X86__SPARSEMEM_H */ |
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index e39c790dbfd2..5d08fa280fdf 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_SPINLOCK_H_ | 1 | #ifndef ASM_X86__SPINLOCK_H |
2 | #define _X86_SPINLOCK_H_ | 2 | #define ASM_X86__SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | 5 | #include <asm/rwlock.h> |
@@ -366,4 +366,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
366 | #define _raw_read_relax(lock) cpu_relax() | 366 | #define _raw_read_relax(lock) cpu_relax() |
367 | #define _raw_write_relax(lock) cpu_relax() | 367 | #define _raw_write_relax(lock) cpu_relax() |
368 | 368 | ||
369 | #endif | 369 | #endif /* ASM_X86__SPINLOCK_H */ |
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h index 06c071c9eee9..6aa9b562c508 100644 --- a/include/asm-x86/spinlock_types.h +++ b/include/asm-x86/spinlock_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | 1 | #ifndef ASM_X86__SPINLOCK_TYPES_H |
2 | #define __ASM_SPINLOCK_TYPES_H | 2 | #define ASM_X86__SPINLOCK_TYPES_H |
3 | 3 | ||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | 4 | #ifndef __LINUX_SPINLOCK_TYPES_H |
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
@@ -17,4 +17,4 @@ typedef struct { | |||
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
19 | 19 | ||
20 | #endif | 20 | #endif /* ASM_X86__SPINLOCK_TYPES_H */ |
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h index 774c919dc232..5363e4f7e1cd 100644 --- a/include/asm-x86/srat.h +++ b/include/asm-x86/srat.h | |||
@@ -24,8 +24,8 @@ | |||
24 | * Send feedback to Pat Gaughen <gone@us.ibm.com> | 24 | * Send feedback to Pat Gaughen <gone@us.ibm.com> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifndef _ASM_SRAT_H_ | 27 | #ifndef ASM_X86__SRAT_H |
28 | #define _ASM_SRAT_H_ | 28 | #define ASM_X86__SRAT_H |
29 | 29 | ||
30 | #ifdef CONFIG_ACPI_NUMA | 30 | #ifdef CONFIG_ACPI_NUMA |
31 | extern int get_memcfg_from_srat(void); | 31 | extern int get_memcfg_from_srat(void); |
@@ -36,4 +36,4 @@ static inline int get_memcfg_from_srat(void) | |||
36 | } | 36 | } |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #endif /* _ASM_SRAT_H_ */ | 39 | #endif /* ASM_X86__SRAT_H */ |
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h index 30f82526a8e2..f43517e28532 100644 --- a/include/asm-x86/stacktrace.h +++ b/include/asm-x86/stacktrace.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_STACKTRACE_H | 1 | #ifndef ASM_X86__STACKTRACE_H |
2 | #define _ASM_STACKTRACE_H 1 | 2 | #define ASM_X86__STACKTRACE_H |
3 | 3 | ||
4 | extern int kstack_depth_to_print; | 4 | extern int kstack_depth_to_print; |
5 | 5 | ||
@@ -18,4 +18,4 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | |||
18 | unsigned long *stack, unsigned long bp, | 18 | unsigned long *stack, unsigned long bp, |
19 | const struct stacktrace_ops *ops, void *data); | 19 | const struct stacktrace_ops *ops, void *data); |
20 | 20 | ||
21 | #endif | 21 | #endif /* ASM_X86__STACKTRACE_H */ |
diff --git a/include/asm-x86/stat.h b/include/asm-x86/stat.h index 5c22dcb5d17e..1e120f628905 100644 --- a/include/asm-x86/stat.h +++ b/include/asm-x86/stat.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_STAT_H | 1 | #ifndef ASM_X86__STAT_H |
2 | #define _ASM_X86_STAT_H | 2 | #define ASM_X86__STAT_H |
3 | 3 | ||
4 | #define STAT_HAVE_NSEC 1 | 4 | #define STAT_HAVE_NSEC 1 |
5 | 5 | ||
@@ -111,4 +111,4 @@ struct __old_kernel_stat { | |||
111 | #endif | 111 | #endif |
112 | }; | 112 | }; |
113 | 113 | ||
114 | #endif | 114 | #endif /* ASM_X86__STAT_H */ |
diff --git a/include/asm-x86/statfs.h b/include/asm-x86/statfs.h index 7c651aa97252..3f005bc3aa5b 100644 --- a/include/asm-x86/statfs.h +++ b/include/asm-x86/statfs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_STATFS_H | 1 | #ifndef ASM_X86__STATFS_H |
2 | #define _ASM_X86_STATFS_H | 2 | #define ASM_X86__STATFS_H |
3 | 3 | ||
4 | #ifdef __i386__ | 4 | #ifdef __i386__ |
5 | #include <asm-generic/statfs.h> | 5 | #include <asm-generic/statfs.h> |
@@ -60,4 +60,4 @@ struct compat_statfs64 { | |||
60 | } __attribute__((packed)); | 60 | } __attribute__((packed)); |
61 | 61 | ||
62 | #endif /* !__i386__ */ | 62 | #endif /* !__i386__ */ |
63 | #endif | 63 | #endif /* ASM_X86__STATFS_H */ |
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index 193578cd1fd9..487843ed245a 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_STRING_H_ | 1 | #ifndef ASM_X86__STRING_32_H |
2 | #define _I386_STRING_H_ | 2 | #define ASM_X86__STRING_32_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
@@ -323,4 +323,4 @@ extern void *memscan(void *addr, int c, size_t size); | |||
323 | 323 | ||
324 | #endif /* __KERNEL__ */ | 324 | #endif /* __KERNEL__ */ |
325 | 325 | ||
326 | #endif | 326 | #endif /* ASM_X86__STRING_32_H */ |
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index 52b5ab383395..a2add11d3b66 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_64_STRING_H_ | 1 | #ifndef ASM_X86__STRING_64_H |
2 | #define _X86_64_STRING_H_ | 2 | #define ASM_X86__STRING_64_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
@@ -57,4 +57,4 @@ int strcmp(const char *cs, const char *ct); | |||
57 | 57 | ||
58 | #endif /* __KERNEL__ */ | 58 | #endif /* __KERNEL__ */ |
59 | 59 | ||
60 | #endif | 60 | #endif /* ASM_X86__STRING_64_H */ |
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/summit/apic.h index c47e2ab5c5ca..c5b2e4b10358 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/summit/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_SUMMIT_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_SUMMIT_APIC_H |
3 | 3 | ||
4 | #include <asm/smp.h> | 4 | #include <asm/smp.h> |
5 | 5 | ||
@@ -21,7 +21,7 @@ static inline cpumask_t target_cpus(void) | |||
21 | * Just start on cpu 0. IRQ balancing will spread load | 21 | * Just start on cpu 0. IRQ balancing will spread load |
22 | */ | 22 | */ |
23 | return cpumask_of_cpu(0); | 23 | return cpumask_of_cpu(0); |
24 | } | 24 | } |
25 | #define TARGET_CPUS (target_cpus()) | 25 | #define TARGET_CPUS (target_cpus()) |
26 | 26 | ||
27 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 27 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
@@ -30,10 +30,10 @@ static inline cpumask_t target_cpus(void) | |||
30 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | 30 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
31 | { | 31 | { |
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | 35 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ |
36 | static inline unsigned long check_apicid_present(int bit) | 36 | static inline unsigned long check_apicid_present(int bit) |
37 | { | 37 | { |
38 | return 1; | 38 | return 1; |
39 | } | 39 | } |
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | |||
122 | 122 | ||
123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | 123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) |
124 | { | 124 | { |
125 | return physid_mask_of_physid(apicid); | 125 | return physid_mask_of_physid(0); |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
@@ -143,22 +143,22 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
143 | int num_bits_set; | 143 | int num_bits_set; |
144 | int cpus_found = 0; | 144 | int cpus_found = 0; |
145 | int cpu; | 145 | int cpu; |
146 | int apicid; | 146 | int apicid; |
147 | 147 | ||
148 | num_bits_set = cpus_weight(cpumask); | 148 | num_bits_set = cpus_weight(cpumask); |
149 | /* Return id to all */ | 149 | /* Return id to all */ |
150 | if (num_bits_set == NR_CPUS) | 150 | if (num_bits_set == NR_CPUS) |
151 | return (int) 0xFF; | 151 | return (int) 0xFF; |
152 | /* | 152 | /* |
153 | * The cpus in the mask must all be on the apic cluster. If are not | 153 | * The cpus in the mask must all be on the apic cluster. If are not |
154 | * on the same apicid cluster return default value of TARGET_CPUS. | 154 | * on the same apicid cluster return default value of TARGET_CPUS. |
155 | */ | 155 | */ |
156 | cpu = first_cpu(cpumask); | 156 | cpu = first_cpu(cpumask); |
157 | apicid = cpu_to_logical_apicid(cpu); | 157 | apicid = cpu_to_logical_apicid(cpu); |
158 | while (cpus_found < num_bits_set) { | 158 | while (cpus_found < num_bits_set) { |
159 | if (cpu_isset(cpu, cpumask)) { | 159 | if (cpu_isset(cpu, cpumask)) { |
160 | int new_apicid = cpu_to_logical_apicid(cpu); | 160 | int new_apicid = cpu_to_logical_apicid(cpu); |
161 | if (apicid_cluster(apicid) != | 161 | if (apicid_cluster(apicid) != |
162 | apicid_cluster(new_apicid)){ | 162 | apicid_cluster(new_apicid)){ |
163 | printk ("%s: Not a valid mask!\n",__FUNCTION__); | 163 | printk ("%s: Not a valid mask!\n",__FUNCTION__); |
164 | return 0xFF; | 164 | return 0xFF; |
@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
182 | return hard_smp_processor_id() >> index_msb; | 182 | return hard_smp_processor_id() >> index_msb; |
183 | } | 183 | } |
184 | 184 | ||
185 | #endif /* __ASM_MACH_APIC_H */ | 185 | #endif /* __ASM_SUMMIT_APIC_H */ |
diff --git a/include/asm-x86/summit/apicdef.h b/include/asm-x86/summit/apicdef.h new file mode 100644 index 000000000000..f3fbca1f61c1 --- /dev/null +++ b/include/asm-x86/summit/apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_SUMMIT_APICDEF_H | ||
2 | #define __ASM_SUMMIT_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (x>>24)&0xFF; | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/mach-summit/mach_ipi.h b/include/asm-x86/summit/ipi.h index 9404c535b7ec..53bd1e7bd7b4 100644 --- a/include/asm-x86/mach-summit/mach_ipi.h +++ b/include/asm-x86/summit/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_SUMMIT_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_SUMMIT_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); |
5 | 5 | ||
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector) | |||
22 | send_IPI_mask(cpu_online_map, vector); | 22 | send_IPI_mask(cpu_online_map, vector); |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* __ASM_MACH_IPI_H */ | 25 | #endif /* __ASM_SUMMIT_IPI_H */ |
diff --git a/include/asm-x86/mach-summit/irq_vectors_limits.h b/include/asm-x86/summit/irq_vectors_limits.h index 890ce3f5e09a..890ce3f5e09a 100644 --- a/include/asm-x86/mach-summit/irq_vectors_limits.h +++ b/include/asm-x86/summit/irq_vectors_limits.h | |||
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/summit/mpparse.h index fdf591701339..013ce6fab2d5 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/summit/mpparse.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_SUMMIT_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_SUMMIT_MPPARSE_H |
3 | 3 | ||
4 | #include <mach_apic.h> | ||
5 | #include <asm/tsc.h> | 4 | #include <asm/tsc.h> |
6 | 5 | ||
7 | extern int use_cyclone; | 6 | extern int use_cyclone; |
@@ -12,11 +11,11 @@ extern void setup_summit(void); | |||
12 | #define setup_summit() {} | 11 | #define setup_summit() {} |
13 | #endif | 12 | #endif |
14 | 13 | ||
15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 14 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
16 | char *productid) | 15 | char *productid) |
17 | { | 16 | { |
18 | if (!strncmp(oem, "IBM ENSW", 8) && | 17 | if (!strncmp(oem, "IBM ENSW", 8) && |
19 | (!strncmp(productid, "VIGIL SMP", 9) | 18 | (!strncmp(productid, "VIGIL SMP", 9) |
20 | || !strncmp(productid, "EXA", 3) | 19 | || !strncmp(productid, "EXA", 3) |
21 | || !strncmp(productid, "RUTHLESS SMP", 12))){ | 20 | || !strncmp(productid, "RUTHLESS SMP", 12))){ |
22 | mark_tsc_unstable("Summit based system"); | 21 | mark_tsc_unstable("Summit based system"); |
@@ -107,4 +106,4 @@ static inline int is_WPEG(struct rio_detail *rio){ | |||
107 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); | 106 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); |
108 | } | 107 | } |
109 | 108 | ||
110 | #endif /* __ASM_MACH_MPPARSE_H */ | 109 | #endif /* __ASM_SUMMIT_MPPARSE_H */ |
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 8675c6782a7d..acb6d4d491f4 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h | |||
@@ -3,8 +3,8 @@ | |||
3 | * Based on code | 3 | * Based on code |
4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> | 4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> |
5 | */ | 5 | */ |
6 | #ifndef __ASM_X86_32_SUSPEND_H | 6 | #ifndef ASM_X86__SUSPEND_32_H |
7 | #define __ASM_X86_32_SUSPEND_H | 7 | #define ASM_X86__SUSPEND_32_H |
8 | 8 | ||
9 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
10 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
@@ -48,4 +48,4 @@ static inline void acpi_save_register_state(unsigned long return_point) | |||
48 | extern int acpi_save_state_mem(void); | 48 | extern int acpi_save_state_mem(void); |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #endif /* __ASM_X86_32_SUSPEND_H */ | 51 | #endif /* ASM_X86__SUSPEND_32_H */ |
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index dc3262b43072..cf821dd310e8 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h | |||
@@ -3,8 +3,8 @@ | |||
3 | * Based on code | 3 | * Based on code |
4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> | 4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> |
5 | */ | 5 | */ |
6 | #ifndef __ASM_X86_64_SUSPEND_H | 6 | #ifndef ASM_X86__SUSPEND_64_H |
7 | #define __ASM_X86_64_SUSPEND_H | 7 | #define ASM_X86__SUSPEND_64_H |
8 | 8 | ||
9 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
10 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
@@ -49,4 +49,4 @@ extern int acpi_save_state_mem(void); | |||
49 | extern char core_restore_code; | 49 | extern char core_restore_code; |
50 | extern char restore_registers; | 50 | extern char restore_registers; |
51 | 51 | ||
52 | #endif /* __ASM_X86_64_SUSPEND_H */ | 52 | #endif /* ASM_X86__SUSPEND_64_H */ |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index 2730b351afcf..1e20adbcad4b 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_SWIOTLB_H | 1 | #ifndef ASM_X86__SWIOTLB_H |
2 | #define _ASM_SWIOTLB_H 1 | 2 | #define ASM_X86__SWIOTLB_H |
3 | 3 | ||
4 | #include <asm/dma-mapping.h> | 4 | #include <asm/dma-mapping.h> |
5 | 5 | ||
@@ -55,4 +55,4 @@ static inline void pci_swiotlb_init(void) | |||
55 | 55 | ||
56 | static inline void dma_mark_clean(void *addr, size_t size) {} | 56 | static inline void dma_mark_clean(void *addr, size_t size) {} |
57 | 57 | ||
58 | #endif /* _ASM_SWIOTLB_H */ | 58 | #endif /* ASM_X86__SWIOTLB_H */ |
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index b47a1d0b8a83..b689bee71104 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_SYNC_BITOPS_H | 1 | #ifndef ASM_X86__SYNC_BITOPS_H |
2 | #define _I386_SYNC_BITOPS_H | 2 | #define ASM_X86__SYNC_BITOPS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
@@ -127,4 +127,4 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) | |||
127 | 127 | ||
128 | #undef ADDR | 128 | #undef ADDR |
129 | 129 | ||
130 | #endif /* _I386_SYNC_BITOPS_H */ | 130 | #endif /* ASM_X86__SYNC_BITOPS_H */ |
diff --git a/include/asm-x86/syscalls.h b/include/asm-x86/syscalls.h new file mode 100644 index 000000000000..87803da44010 --- /dev/null +++ b/include/asm-x86/syscalls.h | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * syscalls.h - Linux syscall interfaces (arch-specific) | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * See the file COPYING for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_X86_SYSCALLS_H | ||
11 | #define _ASM_X86_SYSCALLS_H | ||
12 | |||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/signal.h> | ||
17 | |||
18 | /* Common in X86_32 and X86_64 */ | ||
19 | /* kernel/ioport.c */ | ||
20 | asmlinkage long sys_ioperm(unsigned long, unsigned long, int); | ||
21 | |||
22 | /* X86_32 only */ | ||
23 | #ifdef CONFIG_X86_32 | ||
24 | /* kernel/process_32.c */ | ||
25 | asmlinkage int sys_fork(struct pt_regs); | ||
26 | asmlinkage int sys_clone(struct pt_regs); | ||
27 | asmlinkage int sys_vfork(struct pt_regs); | ||
28 | asmlinkage int sys_execve(struct pt_regs); | ||
29 | |||
30 | /* kernel/signal_32.c */ | ||
31 | asmlinkage int sys_sigsuspend(int, int, old_sigset_t); | ||
32 | asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, | ||
33 | struct old_sigaction __user *); | ||
34 | asmlinkage int sys_sigaltstack(unsigned long); | ||
35 | asmlinkage unsigned long sys_sigreturn(unsigned long); | ||
36 | asmlinkage int sys_rt_sigreturn(unsigned long); | ||
37 | |||
38 | /* kernel/ioport.c */ | ||
39 | asmlinkage long sys_iopl(unsigned long); | ||
40 | |||
41 | /* kernel/ldt.c */ | ||
42 | asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); | ||
43 | |||
44 | /* kernel/sys_i386_32.c */ | ||
45 | asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, | ||
46 | unsigned long, unsigned long, unsigned long); | ||
47 | struct mmap_arg_struct; | ||
48 | asmlinkage int old_mmap(struct mmap_arg_struct __user *); | ||
49 | struct sel_arg_struct; | ||
50 | asmlinkage int old_select(struct sel_arg_struct __user *); | ||
51 | asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); | ||
52 | struct old_utsname; | ||
53 | asmlinkage int sys_uname(struct old_utsname __user *); | ||
54 | struct oldold_utsname; | ||
55 | asmlinkage int sys_olduname(struct oldold_utsname __user *); | ||
56 | |||
57 | /* kernel/tls.c */ | ||
58 | asmlinkage int sys_set_thread_area(struct user_desc __user *); | ||
59 | asmlinkage int sys_get_thread_area(struct user_desc __user *); | ||
60 | |||
61 | /* kernel/vm86_32.c */ | ||
62 | asmlinkage int sys_vm86old(struct pt_regs); | ||
63 | asmlinkage int sys_vm86(struct pt_regs); | ||
64 | |||
65 | #else /* CONFIG_X86_32 */ | ||
66 | |||
67 | /* X86_64 only */ | ||
68 | /* kernel/process_64.c */ | ||
69 | asmlinkage long sys_fork(struct pt_regs *); | ||
70 | asmlinkage long sys_clone(unsigned long, unsigned long, | ||
71 | void __user *, void __user *, | ||
72 | struct pt_regs *); | ||
73 | asmlinkage long sys_vfork(struct pt_regs *); | ||
74 | asmlinkage long sys_execve(char __user *, char __user * __user *, | ||
75 | char __user * __user *, | ||
76 | struct pt_regs *); | ||
77 | |||
78 | /* kernel/ioport.c */ | ||
79 | asmlinkage long sys_iopl(unsigned int, struct pt_regs *); | ||
80 | |||
81 | /* kernel/signal_64.c */ | ||
82 | asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
83 | struct pt_regs *); | ||
84 | asmlinkage long sys_rt_sigreturn(struct pt_regs *); | ||
85 | |||
86 | /* kernel/sys_x86_64.c */ | ||
87 | asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, | ||
88 | unsigned long, unsigned long, unsigned long); | ||
89 | struct new_utsname; | ||
90 | asmlinkage long sys_uname(struct new_utsname __user *); | ||
91 | |||
92 | #endif /* CONFIG_X86_32 */ | ||
93 | #endif /* _ASM_X86_SYSCALLS_H */ | ||
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 983ce37c491f..34505dd7b24d 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | 1 | #ifndef ASM_X86__SYSTEM_H |
2 | #define _ASM_X86_SYSTEM_H_ | 2 | #define ASM_X86__SYSTEM_H |
3 | 3 | ||
4 | #include <asm/asm.h> | 4 | #include <asm/asm.h> |
5 | #include <asm/segment.h> | 5 | #include <asm/segment.h> |
@@ -419,4 +419,4 @@ static inline void rdtsc_barrier(void) | |||
419 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | 419 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); |
420 | } | 420 | } |
421 | 421 | ||
422 | #endif | 422 | #endif /* ASM_X86__SYSTEM_H */ |
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index 97fa251ccb2b..5aedb8bffc5a 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | 1 | #ifndef ASM_X86__SYSTEM_64_H |
2 | #define __ASM_SYSTEM_H | 2 | #define ASM_X86__SYSTEM_64_H |
3 | 3 | ||
4 | #include <asm/segment.h> | 4 | #include <asm/segment.h> |
5 | #include <asm/cmpxchg.h> | 5 | #include <asm/cmpxchg.h> |
@@ -19,4 +19,4 @@ static inline void write_cr8(unsigned long val) | |||
19 | 19 | ||
20 | #include <linux/irqflags.h> | 20 | #include <linux/irqflags.h> |
21 | 21 | ||
22 | #endif | 22 | #endif /* ASM_X86__SYSTEM_64_H */ |
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h index b1a4ea00df78..e7932d7fbbab 100644 --- a/include/asm-x86/tce.h +++ b/include/asm-x86/tce.h | |||
@@ -21,8 +21,8 @@ | |||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifndef _ASM_X86_64_TCE_H | 24 | #ifndef ASM_X86__TCE_H |
25 | #define _ASM_X86_64_TCE_H | 25 | #define ASM_X86__TCE_H |
26 | 26 | ||
27 | extern unsigned int specified_table_size; | 27 | extern unsigned int specified_table_size; |
28 | struct iommu_table; | 28 | struct iommu_table; |
@@ -45,4 +45,4 @@ extern void * __init alloc_tce_table(void); | |||
45 | extern void __init free_tce_table(void *tbl); | 45 | extern void __init free_tce_table(void *tbl); |
46 | extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar); | 46 | extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar); |
47 | 47 | ||
48 | #endif /* _ASM_X86_64_TCE_H */ | 48 | #endif /* ASM_X86__TCE_H */ |
diff --git a/include/asm-x86/termbits.h b/include/asm-x86/termbits.h index af1b70ea440f..3d00dc5e0c71 100644 --- a/include/asm-x86/termbits.h +++ b/include/asm-x86/termbits.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_TERMBITS_H | 1 | #ifndef ASM_X86__TERMBITS_H |
2 | #define _ASM_X86_TERMBITS_H | 2 | #define ASM_X86__TERMBITS_H |
3 | 3 | ||
4 | #include <linux/posix_types.h> | 4 | #include <linux/posix_types.h> |
5 | 5 | ||
@@ -195,4 +195,4 @@ struct ktermios { | |||
195 | #define TCSADRAIN 1 | 195 | #define TCSADRAIN 1 |
196 | #define TCSAFLUSH 2 | 196 | #define TCSAFLUSH 2 |
197 | 197 | ||
198 | #endif /* _ASM_X86_TERMBITS_H */ | 198 | #endif /* ASM_X86__TERMBITS_H */ |
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h index f72956331c49..e235db248071 100644 --- a/include/asm-x86/termios.h +++ b/include/asm-x86/termios.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_TERMIOS_H | 1 | #ifndef ASM_X86__TERMIOS_H |
2 | #define _ASM_X86_TERMIOS_H | 2 | #define ASM_X86__TERMIOS_H |
3 | 3 | ||
4 | #include <asm/termbits.h> | 4 | #include <asm/termbits.h> |
5 | #include <asm/ioctls.h> | 5 | #include <asm/ioctls.h> |
@@ -110,4 +110,4 @@ static inline int kernel_termios_to_user_termios_1(struct termios __user *u, | |||
110 | 110 | ||
111 | #endif /* __KERNEL__ */ | 111 | #endif /* __KERNEL__ */ |
112 | 112 | ||
113 | #endif /* _ASM_X86_TERMIOS_H */ | 113 | #endif /* ASM_X86__TERMIOS_H */ |
diff --git a/include/asm-x86/therm_throt.h b/include/asm-x86/therm_throt.h index 399bf6026b16..1c7f57b6b66e 100644 --- a/include/asm-x86/therm_throt.h +++ b/include/asm-x86/therm_throt.h | |||
@@ -1,9 +1,9 @@ | |||
1 | #ifndef __ASM_I386_THERM_THROT_H__ | 1 | #ifndef ASM_X86__THERM_THROT_H |
2 | #define __ASM_I386_THERM_THROT_H__ 1 | 2 | #define ASM_X86__THERM_THROT_H |
3 | 3 | ||
4 | #include <asm/atomic.h> | 4 | #include <asm/atomic.h> |
5 | 5 | ||
6 | extern atomic_t therm_throt_en; | 6 | extern atomic_t therm_throt_en; |
7 | int therm_throt_process(int curr); | 7 | int therm_throt_process(int curr); |
8 | 8 | ||
9 | #endif /* __ASM_I386_THERM_THROT_H__ */ | 9 | #endif /* ASM_X86__THERM_THROT_H */ |
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index da0a675adf94..30586f2ee558 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h | |||
@@ -4,8 +4,8 @@ | |||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | 4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #ifndef _ASM_X86_THREAD_INFO_H | 7 | #ifndef ASM_X86__THREAD_INFO_H |
8 | #define _ASM_X86_THREAD_INFO_H | 8 | #define ASM_X86__THREAD_INFO_H |
9 | 9 | ||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
@@ -239,6 +239,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
239 | #define TS_POLLING 0x0004 /* true if in idle loop | 239 | #define TS_POLLING 0x0004 /* true if in idle loop |
240 | and not sleeping */ | 240 | and not sleeping */ |
241 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | 241 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
242 | #define TS_XSAVE 0x0010 /* Use xsave/xrstor */ | ||
242 | 243 | ||
243 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 244 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
244 | 245 | ||
@@ -258,4 +259,4 @@ extern void free_thread_info(struct thread_info *ti); | |||
258 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); | 259 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); |
259 | #define arch_task_cache_init arch_task_cache_init | 260 | #define arch_task_cache_init arch_task_cache_init |
260 | #endif | 261 | #endif |
261 | #endif /* _ASM_X86_THREAD_INFO_H */ | 262 | #endif /* ASM_X86__THREAD_INFO_H */ |
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h index a17fa473e91d..3e724eef7ac4 100644 --- a/include/asm-x86/time.h +++ b/include/asm-x86/time.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASMX86_TIME_H | 1 | #ifndef ASM_X86__TIME_H |
2 | #define _ASMX86_TIME_H | 2 | #define ASM_X86__TIME_H |
3 | 3 | ||
4 | extern void hpet_time_init(void); | 4 | extern void hpet_time_init(void); |
5 | 5 | ||
@@ -46,6 +46,8 @@ static inline int native_set_wallclock(unsigned long nowtime) | |||
46 | 46 | ||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | extern void time_init(void); | ||
50 | |||
49 | #ifdef CONFIG_PARAVIRT | 51 | #ifdef CONFIG_PARAVIRT |
50 | #include <asm/paravirt.h> | 52 | #include <asm/paravirt.h> |
51 | #else /* !CONFIG_PARAVIRT */ | 53 | #else /* !CONFIG_PARAVIRT */ |
@@ -58,4 +60,4 @@ static inline int native_set_wallclock(unsigned long nowtime) | |||
58 | 60 | ||
59 | extern unsigned long __init calibrate_cpu(void); | 61 | extern unsigned long __init calibrate_cpu(void); |
60 | 62 | ||
61 | #endif | 63 | #endif /* ASM_X86__TIME_H */ |
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h index fb2a4ddddf3d..d0babce4b47a 100644 --- a/include/asm-x86/timer.h +++ b/include/asm-x86/timer.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASMi386_TIMER_H | 1 | #ifndef ASM_X86__TIMER_H |
2 | #define _ASMi386_TIMER_H | 2 | #define ASM_X86__TIMER_H |
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/percpu.h> | 5 | #include <linux/percpu.h> |
@@ -9,9 +9,12 @@ | |||
9 | unsigned long long native_sched_clock(void); | 9 | unsigned long long native_sched_clock(void); |
10 | unsigned long native_calibrate_tsc(void); | 10 | unsigned long native_calibrate_tsc(void); |
11 | 11 | ||
12 | #ifdef CONFIG_X86_32 | ||
12 | extern int timer_ack; | 13 | extern int timer_ack; |
13 | extern int no_timer_check; | ||
14 | extern int recalibrate_cpu_khz(void); | 14 | extern int recalibrate_cpu_khz(void); |
15 | #endif /* CONFIG_X86_32 */ | ||
16 | |||
17 | extern int no_timer_check; | ||
15 | 18 | ||
16 | #ifndef CONFIG_PARAVIRT | 19 | #ifndef CONFIG_PARAVIRT |
17 | #define calibrate_tsc() native_calibrate_tsc() | 20 | #define calibrate_tsc() native_calibrate_tsc() |
@@ -60,4 +63,4 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |||
60 | return ns; | 63 | return ns; |
61 | } | 64 | } |
62 | 65 | ||
63 | #endif | 66 | #endif /* ASM_X86__TIMER_H */ |
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h index 43e5a78500c5..d1ce2416a5da 100644 --- a/include/asm-x86/timex.h +++ b/include/asm-x86/timex.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* x86 architecture timex specifications */ | 1 | /* x86 architecture timex specifications */ |
2 | #ifndef _ASM_X86_TIMEX_H | 2 | #ifndef ASM_X86__TIMEX_H |
3 | #define _ASM_X86_TIMEX_H | 3 | #define ASM_X86__TIMEX_H |
4 | 4 | ||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/tsc.h> | 6 | #include <asm/tsc.h> |
@@ -16,4 +16,4 @@ | |||
16 | 16 | ||
17 | #define ARCH_HAS_READ_CURRENT_TIMER | 17 | #define ARCH_HAS_READ_CURRENT_TIMER |
18 | 18 | ||
19 | #endif | 19 | #endif /* ASM_X86__TIMEX_H */ |
diff --git a/include/asm-x86/tlb.h b/include/asm-x86/tlb.h index e4e9e2d07a93..db36e9e89e87 100644 --- a/include/asm-x86/tlb.h +++ b/include/asm-x86/tlb.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_TLB_H | 1 | #ifndef ASM_X86__TLB_H |
2 | #define _ASM_X86_TLB_H | 2 | #define ASM_X86__TLB_H |
3 | 3 | ||
4 | #define tlb_start_vma(tlb, vma) do { } while (0) | 4 | #define tlb_start_vma(tlb, vma) do { } while (0) |
5 | #define tlb_end_vma(tlb, vma) do { } while (0) | 5 | #define tlb_end_vma(tlb, vma) do { } while (0) |
@@ -8,4 +8,4 @@ | |||
8 | 8 | ||
9 | #include <asm-generic/tlb.h> | 9 | #include <asm-generic/tlb.h> |
10 | 10 | ||
11 | #endif | 11 | #endif /* ASM_X86__TLB_H */ |
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 35c76ceb9f40..ef68b76dc3c5 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_TLBFLUSH_H | 1 | #ifndef ASM_X86__TLBFLUSH_H |
2 | #define _ASM_X86_TLBFLUSH_H | 2 | #define ASM_X86__TLBFLUSH_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
@@ -165,4 +165,4 @@ static inline void flush_tlb_kernel_range(unsigned long start, | |||
165 | flush_tlb_all(); | 165 | flush_tlb_all(); |
166 | } | 166 | } |
167 | 167 | ||
168 | #endif /* _ASM_X86_TLBFLUSH_H */ | 168 | #endif /* ASM_X86__TLBFLUSH_H */ |
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 90ac7718469a..7eca9bc022b2 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h | |||
@@ -22,8 +22,8 @@ | |||
22 | * | 22 | * |
23 | * Send feedback to <colpatch@us.ibm.com> | 23 | * Send feedback to <colpatch@us.ibm.com> |
24 | */ | 24 | */ |
25 | #ifndef _ASM_X86_TOPOLOGY_H | 25 | #ifndef ASM_X86__TOPOLOGY_H |
26 | #define _ASM_X86_TOPOLOGY_H | 26 | #define ASM_X86__TOPOLOGY_H |
27 | 27 | ||
28 | #ifdef CONFIG_X86_32 | 28 | #ifdef CONFIG_X86_32 |
29 | # ifdef CONFIG_X86_HT | 29 | # ifdef CONFIG_X86_HT |
@@ -255,4 +255,4 @@ static inline void set_mp_bus_to_node(int busnum, int node) | |||
255 | } | 255 | } |
256 | #endif | 256 | #endif |
257 | 257 | ||
258 | #endif /* _ASM_X86_TOPOLOGY_H */ | 258 | #endif /* ASM_X86__TOPOLOGY_H */ |
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h index b156b08d0131..0406bbd898a9 100644 --- a/include/asm-x86/trampoline.h +++ b/include/asm-x86/trampoline.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __TRAMPOLINE_HEADER | 1 | #ifndef ASM_X86__TRAMPOLINE_H |
2 | #define __TRAMPOLINE_HEADER | 2 | #define ASM_X86__TRAMPOLINE_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | 5 | ||
@@ -18,4 +18,4 @@ extern unsigned long setup_trampoline(void); | |||
18 | 18 | ||
19 | #endif /* __ASSEMBLY__ */ | 19 | #endif /* __ASSEMBLY__ */ |
20 | 20 | ||
21 | #endif /* __TRAMPOLINE_HEADER */ | 21 | #endif /* ASM_X86__TRAMPOLINE_H */ |
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h index a4b65a71bd66..2ccebc6fb0b0 100644 --- a/include/asm-x86/traps.h +++ b/include/asm-x86/traps.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_TRAPS_H | 1 | #ifndef ASM_X86__TRAPS_H |
2 | #define _ASM_X86_TRAPS_H | 2 | #define ASM_X86__TRAPS_H |
3 | 3 | ||
4 | /* Common in X86_32 and X86_64 */ | 4 | /* Common in X86_32 and X86_64 */ |
5 | asmlinkage void divide_error(void); | 5 | asmlinkage void divide_error(void); |
@@ -51,6 +51,8 @@ void do_spurious_interrupt_bug(struct pt_regs *, long); | |||
51 | unsigned long patch_espfix_desc(unsigned long, unsigned long); | 51 | unsigned long patch_espfix_desc(unsigned long, unsigned long); |
52 | asmlinkage void math_emulate(long); | 52 | asmlinkage void math_emulate(long); |
53 | 53 | ||
54 | void do_page_fault(struct pt_regs *regs, unsigned long error_code); | ||
55 | |||
54 | #else /* CONFIG_X86_32 */ | 56 | #else /* CONFIG_X86_32 */ |
55 | 57 | ||
56 | asmlinkage void double_fault(void); | 58 | asmlinkage void double_fault(void); |
@@ -62,5 +64,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *); | |||
62 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *); | 64 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *); |
63 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs *); | 65 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs *); |
64 | 66 | ||
67 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code); | ||
68 | |||
65 | #endif /* CONFIG_X86_32 */ | 69 | #endif /* CONFIG_X86_32 */ |
66 | #endif /* _ASM_X86_TRAPS_H */ | 70 | #endif /* ASM_X86__TRAPS_H */ |
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index cb6f6ee45b8f..ad0f5c41e78c 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * x86 TSC related functions | 2 | * x86 TSC related functions |
3 | */ | 3 | */ |
4 | #ifndef _ASM_X86_TSC_H | 4 | #ifndef ASM_X86__TSC_H |
5 | #define _ASM_X86_TSC_H | 5 | #define ASM_X86__TSC_H |
6 | 6 | ||
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | 8 | ||
@@ -59,4 +59,4 @@ extern void check_tsc_sync_target(void); | |||
59 | 59 | ||
60 | extern int notsc_setup(char *); | 60 | extern int notsc_setup(char *); |
61 | 61 | ||
62 | #endif | 62 | #endif /* ASM_X86__TSC_H */ |
diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h index 1ac80cd9acf8..e78b52e17444 100644 --- a/include/asm-x86/types.h +++ b/include/asm-x86/types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_TYPES_H | 1 | #ifndef ASM_X86__TYPES_H |
2 | #define _ASM_X86_TYPES_H | 2 | #define ASM_X86__TYPES_H |
3 | 3 | ||
4 | #include <asm-generic/int-ll64.h> | 4 | #include <asm-generic/int-ll64.h> |
5 | 5 | ||
@@ -33,4 +33,4 @@ typedef u32 dma_addr_t; | |||
33 | #endif /* __ASSEMBLY__ */ | 33 | #endif /* __ASSEMBLY__ */ |
34 | #endif /* __KERNEL__ */ | 34 | #endif /* __KERNEL__ */ |
35 | 35 | ||
36 | #endif | 36 | #endif /* ASM_X86__TYPES_H */ |
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index 5f702d1d5218..48ebc0ad40ec 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_UACCES_H_ | 1 | #ifndef ASM_X86__UACCESS_H |
2 | #define _ASM_UACCES_H_ | 2 | #define ASM_X86__UACCESS_H |
3 | /* | 3 | /* |
4 | * User space memory access functions | 4 | * User space memory access functions |
5 | */ | 5 | */ |
@@ -450,5 +450,5 @@ extern struct movsl_mask { | |||
450 | # include "uaccess_64.h" | 450 | # include "uaccess_64.h" |
451 | #endif | 451 | #endif |
452 | 452 | ||
453 | #endif | 453 | #endif /* ASM_X86__UACCESS_H */ |
454 | 454 | ||
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index 6fdef39a0bcb..6b5b57d9c6d1 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __i386_UACCESS_H | 1 | #ifndef ASM_X86__UACCESS_32_H |
2 | #define __i386_UACCESS_H | 2 | #define ASM_X86__UACCESS_32_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * User space memory access functions | 5 | * User space memory access functions |
@@ -215,4 +215,4 @@ long strnlen_user(const char __user *str, long n); | |||
215 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | 215 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); |
216 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | 216 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); |
217 | 217 | ||
218 | #endif /* __i386_UACCESS_H */ | 218 | #endif /* ASM_X86__UACCESS_32_H */ |
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 515d4dce96b5..5cfd2951c9e7 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __X86_64_UACCESS_H | 1 | #ifndef ASM_X86__UACCESS_64_H |
2 | #define __X86_64_UACCESS_H | 2 | #define ASM_X86__UACCESS_64_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * User space memory access functions | 5 | * User space memory access functions |
@@ -198,4 +198,4 @@ static inline int __copy_from_user_inatomic_nocache(void *dst, | |||
198 | unsigned long | 198 | unsigned long |
199 | copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); | 199 | copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); |
200 | 200 | ||
201 | #endif /* __X86_64_UACCESS_H */ | 201 | #endif /* ASM_X86__UACCESS_64_H */ |
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h index 50a79f7fcde9..89eaa5456a7e 100644 --- a/include/asm-x86/ucontext.h +++ b/include/asm-x86/ucontext.h | |||
@@ -1,5 +1,11 @@ | |||
1 | #ifndef _ASM_X86_UCONTEXT_H | 1 | #ifndef ASM_X86__UCONTEXT_H |
2 | #define _ASM_X86_UCONTEXT_H | 2 | #define ASM_X86__UCONTEXT_H |
3 | |||
4 | #define UC_FP_XSTATE 0x1 /* indicates the presence of extended state | ||
5 | * information in the memory layout pointed | ||
6 | * by the fpstate pointer in the ucontext's | ||
7 | * sigcontext struct (uc_mcontext). | ||
8 | */ | ||
3 | 9 | ||
4 | struct ucontext { | 10 | struct ucontext { |
5 | unsigned long uc_flags; | 11 | unsigned long uc_flags; |
@@ -9,4 +15,4 @@ struct ucontext { | |||
9 | sigset_t uc_sigmask; /* mask last for extensibility */ | 15 | sigset_t uc_sigmask; /* mask last for extensibility */ |
10 | }; | 16 | }; |
11 | 17 | ||
12 | #endif /* _ASM_X86_UCONTEXT_H */ | 18 | #endif /* ASM_X86__UCONTEXT_H */ |
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index a7bd416b4763..59dcdec37160 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_UNALIGNED_H | 1 | #ifndef ASM_X86__UNALIGNED_H |
2 | #define _ASM_X86_UNALIGNED_H | 2 | #define ASM_X86__UNALIGNED_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The x86 can do unaligned accesses itself. | 5 | * The x86 can do unaligned accesses itself. |
@@ -11,4 +11,4 @@ | |||
11 | #define get_unaligned __get_unaligned_le | 11 | #define get_unaligned __get_unaligned_le |
12 | #define put_unaligned __put_unaligned_le | 12 | #define put_unaligned __put_unaligned_le |
13 | 13 | ||
14 | #endif /* _ASM_X86_UNALIGNED_H */ | 14 | #endif /* ASM_X86__UNALIGNED_H */ |
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index d7394673b772..017f4a87c913 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_I386_UNISTD_H_ | 1 | #ifndef ASM_X86__UNISTD_32_H |
2 | #define _ASM_I386_UNISTD_H_ | 2 | #define ASM_X86__UNISTD_32_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file contains the system call numbers. | 5 | * This file contains the system call numbers. |
@@ -376,4 +376,4 @@ | |||
376 | #endif | 376 | #endif |
377 | 377 | ||
378 | #endif /* __KERNEL__ */ | 378 | #endif /* __KERNEL__ */ |
379 | #endif /* _ASM_I386_UNISTD_H_ */ | 379 | #endif /* ASM_X86__UNISTD_32_H */ |
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index 3a341d791792..ace83f1f6787 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_UNISTD_H_ | 1 | #ifndef ASM_X86__UNISTD_64_H |
2 | #define _ASM_X86_64_UNISTD_H_ | 2 | #define ASM_X86__UNISTD_64_H |
3 | 3 | ||
4 | #ifndef __SYSCALL | 4 | #ifndef __SYSCALL |
5 | #define __SYSCALL(a, b) | 5 | #define __SYSCALL(a, b) |
@@ -690,4 +690,4 @@ __SYSCALL(__NR_inotify_init1, sys_inotify_init1) | |||
690 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | 690 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") |
691 | #endif /* __KERNEL__ */ | 691 | #endif /* __KERNEL__ */ |
692 | 692 | ||
693 | #endif /* _ASM_X86_64_UNISTD_H_ */ | 693 | #endif /* ASM_X86__UNISTD_64_H */ |
diff --git a/include/asm-x86/unwind.h b/include/asm-x86/unwind.h index 8b064bd9c553..a2151567db44 100644 --- a/include/asm-x86/unwind.h +++ b/include/asm-x86/unwind.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_UNWIND_H | 1 | #ifndef ASM_X86__UNWIND_H |
2 | #define _ASM_X86_UNWIND_H | 2 | #define ASM_X86__UNWIND_H |
3 | 3 | ||
4 | #define UNW_PC(frame) ((void)(frame), 0UL) | 4 | #define UNW_PC(frame) ((void)(frame), 0UL) |
5 | #define UNW_SP(frame) ((void)(frame), 0UL) | 5 | #define UNW_SP(frame) ((void)(frame), 0UL) |
@@ -10,4 +10,4 @@ static inline int arch_unw_user_mode(const void *info) | |||
10 | return 0; | 10 | return 0; |
11 | } | 11 | } |
12 | 12 | ||
13 | #endif /* _ASM_X86_UNWIND_H */ | 13 | #endif /* ASM_X86__UNWIND_H */ |
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h index a3d910047879..aa66c1857f06 100644 --- a/include/asm-x86/user32.h +++ b/include/asm-x86/user32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef USER32_H | 1 | #ifndef ASM_X86__USER32_H |
2 | #define USER32_H 1 | 2 | #define ASM_X86__USER32_H |
3 | 3 | ||
4 | /* IA32 compatible user structures for ptrace. | 4 | /* IA32 compatible user structures for ptrace. |
5 | * These should be used for 32bit coredumps too. */ | 5 | * These should be used for 32bit coredumps too. */ |
@@ -67,4 +67,4 @@ struct user32 { | |||
67 | }; | 67 | }; |
68 | 68 | ||
69 | 69 | ||
70 | #endif | 70 | #endif /* ASM_X86__USER32_H */ |
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index d6e51edc259d..e0fe2f55f1a6 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _I386_USER_H | 1 | #ifndef ASM_X86__USER_32_H |
2 | #define _I386_USER_H | 2 | #define ASM_X86__USER_32_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | /* Core file format: The core file is written in such a way that gdb | 5 | /* Core file format: The core file is written in such a way that gdb |
@@ -128,4 +128,4 @@ struct user{ | |||
128 | #define HOST_TEXT_START_ADDR (u.start_code) | 128 | #define HOST_TEXT_START_ADDR (u.start_code) |
129 | #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) | 129 | #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) |
130 | 130 | ||
131 | #endif /* _I386_USER_H */ | 131 | #endif /* ASM_X86__USER_32_H */ |
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 6037b634c77f..38b5799863b4 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _X86_64_USER_H | 1 | #ifndef ASM_X86__USER_64_H |
2 | #define _X86_64_USER_H | 2 | #define ASM_X86__USER_64_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
@@ -134,4 +134,4 @@ struct user { | |||
134 | #define HOST_TEXT_START_ADDR (u.start_code) | 134 | #define HOST_TEXT_START_ADDR (u.start_code) |
135 | #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) | 135 | #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) |
136 | 136 | ||
137 | #endif /* _X86_64_USER_H */ | 137 | #endif /* ASM_X86__USER_64_H */ |
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h index aa73362ff5df..7cd6d7ec1308 100644 --- a/include/asm-x86/uv/bios.h +++ b/include/asm-x86/uv/bios.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_BIOS_H | 1 | #ifndef ASM_X86__UV__BIOS_H |
2 | #define _ASM_X86_BIOS_H | 2 | #define ASM_X86__UV__BIOS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * BIOS layer definitions. | 5 | * BIOS layer definitions. |
@@ -65,4 +65,4 @@ x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second, | |||
65 | unsigned long *drift_info); | 65 | unsigned long *drift_info); |
66 | extern const char *x86_bios_strerror(long status); | 66 | extern const char *x86_bios_strerror(long status); |
67 | 67 | ||
68 | #endif /* _ASM_X86_BIOS_H */ | 68 | #endif /* ASM_X86__UV__BIOS_H */ |
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h index 610b6b308e93..77153fb18f5e 100644 --- a/include/asm-x86/uv/uv_bau.h +++ b/include/asm-x86/uv/uv_bau.h | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __ASM_X86_UV_BAU__ | 11 | #ifndef ASM_X86__UV__UV_BAU_H |
12 | #define __ASM_X86_UV_BAU__ | 12 | #define ASM_X86__UV__UV_BAU_H |
13 | 13 | ||
14 | #include <linux/bitmap.h> | 14 | #include <linux/bitmap.h> |
15 | #define BITSPERBYTE 8 | 15 | #define BITSPERBYTE 8 |
@@ -329,4 +329,4 @@ extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long); | |||
329 | extern void uv_bau_message_intr1(void); | 329 | extern void uv_bau_message_intr1(void); |
330 | extern void uv_bau_timeout_intr1(void); | 330 | extern void uv_bau_timeout_intr1(void); |
331 | 331 | ||
332 | #endif /* __ASM_X86_UV_BAU__ */ | 332 | #endif /* ASM_X86__UV__UV_BAU_H */ |
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h index a4ef26e5850b..bdb5b01afbf5 100644 --- a/include/asm-x86/uv/uv_hub.h +++ b/include/asm-x86/uv/uv_hub.h | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __ASM_X86_UV_HUB_H__ | 11 | #ifndef ASM_X86__UV__UV_HUB_H |
12 | #define __ASM_X86_UV_HUB_H__ | 12 | #define ASM_X86__UV__UV_HUB_H |
13 | 13 | ||
14 | #include <linux/numa.h> | 14 | #include <linux/numa.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
@@ -350,5 +350,5 @@ static inline int uv_num_possible_blades(void) | |||
350 | return uv_possible_blades; | 350 | return uv_possible_blades; |
351 | } | 351 | } |
352 | 352 | ||
353 | #endif /* __ASM_X86_UV_HUB__ */ | 353 | #endif /* ASM_X86__UV__UV_HUB_H */ |
354 | 354 | ||
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h index 151fd7fcb809..8b03d89d2459 100644 --- a/include/asm-x86/uv/uv_mmrs.h +++ b/include/asm-x86/uv/uv_mmrs.h | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __ASM_X86_UV_MMRS__ | 11 | #ifndef ASM_X86__UV__UV_MMRS_H |
12 | #define __ASM_X86_UV_MMRS__ | 12 | #define ASM_X86__UV__UV_MMRS_H |
13 | 13 | ||
14 | #define UV_MMR_ENABLE (1UL << 63) | 14 | #define UV_MMR_ENABLE (1UL << 63) |
15 | 15 | ||
@@ -1292,4 +1292,4 @@ union uvh_si_alias2_overlay_config_u { | |||
1292 | }; | 1292 | }; |
1293 | 1293 | ||
1294 | 1294 | ||
1295 | #endif /* __ASM_X86_UV_MMRS__ */ | 1295 | #endif /* ASM_X86__UV__UV_MMRS_H */ |
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 8e18fb80f5e6..4ab320913ea3 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_VDSO_H | 1 | #ifndef ASM_X86__VDSO_H |
2 | #define _ASM_X86_VDSO_H 1 | 2 | #define ASM_X86__VDSO_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_64 | 4 | #ifdef CONFIG_X86_64 |
5 | extern const char VDSO64_PRELINK[]; | 5 | extern const char VDSO64_PRELINK[]; |
@@ -44,4 +44,4 @@ extern const char vdso32_int80_start, vdso32_int80_end; | |||
44 | extern const char vdso32_syscall_start, vdso32_syscall_end; | 44 | extern const char vdso32_syscall_start, vdso32_syscall_end; |
45 | extern const char vdso32_sysenter_start, vdso32_sysenter_end; | 45 | extern const char vdso32_sysenter_start, vdso32_sysenter_end; |
46 | 46 | ||
47 | #endif /* asm-x86/vdso.h */ | 47 | #endif /* ASM_X86__VDSO_H */ |
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h index 0ccf804377e6..b9e493d07d07 100644 --- a/include/asm-x86/vga.h +++ b/include/asm-x86/vga.h | |||
@@ -4,8 +4,8 @@ | |||
4 | * (c) 1998 Martin Mares <mj@ucw.cz> | 4 | * (c) 1998 Martin Mares <mj@ucw.cz> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #ifndef _LINUX_ASM_VGA_H_ | 7 | #ifndef ASM_X86__VGA_H |
8 | #define _LINUX_ASM_VGA_H_ | 8 | #define ASM_X86__VGA_H |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * On the PC, we can just recalculate addresses and then | 11 | * On the PC, we can just recalculate addresses and then |
@@ -17,4 +17,4 @@ | |||
17 | #define vga_readb(x) (*(x)) | 17 | #define vga_readb(x) (*(x)) |
18 | #define vga_writeb(x, y) (*(y) = (x)) | 18 | #define vga_writeb(x, y) (*(y) = (x)) |
19 | 19 | ||
20 | #endif | 20 | #endif /* ASM_X86__VGA_H */ |
diff --git a/include/asm-x86/vgtod.h b/include/asm-x86/vgtod.h index 3301f0929342..38fd13364021 100644 --- a/include/asm-x86/vgtod.h +++ b/include/asm-x86/vgtod.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_VGTOD_H | 1 | #ifndef ASM_X86__VGTOD_H |
2 | #define _ASM_VGTOD_H 1 | 2 | #define ASM_X86__VGTOD_H |
3 | 3 | ||
4 | #include <asm/vsyscall.h> | 4 | #include <asm/vsyscall.h> |
5 | #include <linux/clocksource.h> | 5 | #include <linux/clocksource.h> |
@@ -26,4 +26,4 @@ extern struct vsyscall_gtod_data __vsyscall_gtod_data | |||
26 | __section_vsyscall_gtod_data; | 26 | __section_vsyscall_gtod_data; |
27 | extern struct vsyscall_gtod_data vsyscall_gtod_data; | 27 | extern struct vsyscall_gtod_data vsyscall_gtod_data; |
28 | 28 | ||
29 | #endif | 29 | #endif /* ASM_X86__VGTOD_H */ |
diff --git a/include/asm-x86/visws/cobalt.h b/include/asm-x86/visws/cobalt.h index 995258831b7f..9627a8fe84e9 100644 --- a/include/asm-x86/visws/cobalt.h +++ b/include/asm-x86/visws/cobalt.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __I386_SGI_COBALT_H | 1 | #ifndef ASM_X86__VISWS__COBALT_H |
2 | #define __I386_SGI_COBALT_H | 2 | #define ASM_X86__VISWS__COBALT_H |
3 | 3 | ||
4 | #include <asm/fixmap.h> | 4 | #include <asm/fixmap.h> |
5 | 5 | ||
@@ -122,4 +122,4 @@ extern char visws_board_type; | |||
122 | 122 | ||
123 | extern char visws_board_rev; | 123 | extern char visws_board_rev; |
124 | 124 | ||
125 | #endif /* __I386_SGI_COBALT_H */ | 125 | #endif /* ASM_X86__VISWS__COBALT_H */ |
diff --git a/include/asm-x86/visws/lithium.h b/include/asm-x86/visws/lithium.h index dfcd4f07ab85..b36d3b378c63 100644 --- a/include/asm-x86/visws/lithium.h +++ b/include/asm-x86/visws/lithium.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __I386_SGI_LITHIUM_H | 1 | #ifndef ASM_X86__VISWS__LITHIUM_H |
2 | #define __I386_SGI_LITHIUM_H | 2 | #define ASM_X86__VISWS__LITHIUM_H |
3 | 3 | ||
4 | #include <asm/fixmap.h> | 4 | #include <asm/fixmap.h> |
5 | 5 | ||
@@ -49,5 +49,5 @@ static inline unsigned short li_pcib_read16(unsigned long reg) | |||
49 | return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); | 49 | return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); |
50 | } | 50 | } |
51 | 51 | ||
52 | #endif | 52 | #endif /* ASM_X86__VISWS__LITHIUM_H */ |
53 | 53 | ||
diff --git a/include/asm-x86/visws/piix4.h b/include/asm-x86/visws/piix4.h index 83ea4f46e419..61c938045ec9 100644 --- a/include/asm-x86/visws/piix4.h +++ b/include/asm-x86/visws/piix4.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __I386_SGI_PIIX_H | 1 | #ifndef ASM_X86__VISWS__PIIX4_H |
2 | #define __I386_SGI_PIIX_H | 2 | #define ASM_X86__VISWS__PIIX4_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * PIIX4 as used on SGI Visual Workstations | 5 | * PIIX4 as used on SGI Visual Workstations |
@@ -104,4 +104,4 @@ | |||
104 | */ | 104 | */ |
105 | #define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in | 105 | #define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in |
106 | 106 | ||
107 | #endif | 107 | #endif /* ASM_X86__VISWS__PIIX4_H */ |
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index 5ce351325e01..998bd18eb737 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _LINUX_VM86_H | 1 | #ifndef ASM_X86__VM86_H |
2 | #define _LINUX_VM86_H | 2 | #define ASM_X86__VM86_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * I'm guessing at the VIF/VIP flag usage, but hope that this is how | 5 | * I'm guessing at the VIF/VIP flag usage, but hope that this is how |
@@ -205,4 +205,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) | |||
205 | 205 | ||
206 | #endif /* __KERNEL__ */ | 206 | #endif /* __KERNEL__ */ |
207 | 207 | ||
208 | #endif | 208 | #endif /* ASM_X86__VM86_H */ |
diff --git a/include/asm-x86/vmi_time.h b/include/asm-x86/vmi_time.h index c3118c385156..b2d39e6a08b7 100644 --- a/include/asm-x86/vmi_time.h +++ b/include/asm-x86/vmi_time.h | |||
@@ -22,8 +22,8 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #ifndef __VMI_TIME_H | 25 | #ifndef ASM_X86__VMI_TIME_H |
26 | #define __VMI_TIME_H | 26 | #define ASM_X86__VMI_TIME_H |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Raw VMI call indices for timer functions | 29 | * Raw VMI call indices for timer functions |
@@ -95,4 +95,4 @@ extern void __devinit vmi_time_ap_init(void); | |||
95 | 95 | ||
96 | #define CONFIG_VMI_ALARM_HZ 100 | 96 | #define CONFIG_VMI_ALARM_HZ 100 |
97 | 97 | ||
98 | #endif | 98 | #endif /* ASM_X86__VMI_TIME_H */ |
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h index 6b66ff905af0..dcd4682413de 100644 --- a/include/asm-x86/vsyscall.h +++ b/include/asm-x86/vsyscall.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_X86_64_VSYSCALL_H_ | 1 | #ifndef ASM_X86__VSYSCALL_H |
2 | #define _ASM_X86_64_VSYSCALL_H_ | 2 | #define ASM_X86__VSYSCALL_H |
3 | 3 | ||
4 | enum vsyscall_num { | 4 | enum vsyscall_num { |
5 | __NR_vgettimeofday, | 5 | __NR_vgettimeofday, |
@@ -41,4 +41,4 @@ extern void map_vsyscall(void); | |||
41 | 41 | ||
42 | #endif /* __KERNEL__ */ | 42 | #endif /* __KERNEL__ */ |
43 | 43 | ||
44 | #endif /* _ASM_X86_64_VSYSCALL_H_ */ | 44 | #endif /* ASM_X86__VSYSCALL_H */ |
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h new file mode 100644 index 000000000000..f2cba4e79a23 --- /dev/null +++ b/include/asm-x86/xcr.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2008 rPath, Inc. - All Rights Reserved | ||
4 | * | ||
5 | * This file is part of the Linux kernel, and is made available under | ||
6 | * the terms of the GNU General Public License version 2 or (at your | ||
7 | * option) any later version; incorporated herein by reference. | ||
8 | * | ||
9 | * ----------------------------------------------------------------------- */ | ||
10 | |||
11 | /* | ||
12 | * asm-x86/xcr.h | ||
13 | * | ||
14 | * Definitions for the eXtended Control Register instructions | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_X86_XCR_H | ||
18 | #define _ASM_X86_XCR_H | ||
19 | |||
20 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | # ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | |||
27 | static inline u64 xgetbv(u32 index) | ||
28 | { | ||
29 | u32 eax, edx; | ||
30 | |||
31 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | ||
32 | : "=a" (eax), "=d" (edx) | ||
33 | : "c" (index)); | ||
34 | return eax + ((u64)edx << 32); | ||
35 | } | ||
36 | |||
37 | static inline void xsetbv(u32 index, u64 value) | ||
38 | { | ||
39 | u32 eax = value; | ||
40 | u32 edx = value >> 32; | ||
41 | |||
42 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | ||
43 | : : "a" (eax), "d" (edx), "c" (index)); | ||
44 | } | ||
45 | |||
46 | # endif /* __ASSEMBLY__ */ | ||
47 | #endif /* __KERNEL__ */ | ||
48 | |||
49 | #endif /* _ASM_X86_XCR_H */ | ||
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h index 8ded74720024..8151f5b8b6cb 100644 --- a/include/asm-x86/xen/events.h +++ b/include/asm-x86/xen/events.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __XEN_EVENTS_H | 1 | #ifndef ASM_X86__XEN__EVENTS_H |
2 | #define __XEN_EVENTS_H | 2 | #define ASM_X86__XEN__EVENTS_H |
3 | 3 | ||
4 | enum ipi_vector { | 4 | enum ipi_vector { |
5 | XEN_RESCHEDULE_VECTOR, | 5 | XEN_RESCHEDULE_VECTOR, |
@@ -21,4 +21,4 @@ static inline void xen_do_IRQ(int irq, struct pt_regs *regs) | |||
21 | do_IRQ(regs); | 21 | do_IRQ(regs); |
22 | } | 22 | } |
23 | 23 | ||
24 | #endif /* __XEN_EVENTS_H */ | 24 | #endif /* ASM_X86__XEN__EVENTS_H */ |
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h index 2444d4593a3b..c4baab4d2b68 100644 --- a/include/asm-x86/xen/grant_table.h +++ b/include/asm-x86/xen/grant_table.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __XEN_GRANT_TABLE_H | 1 | #ifndef ASM_X86__XEN__GRANT_TABLE_H |
2 | #define __XEN_GRANT_TABLE_H | 2 | #define ASM_X86__XEN__GRANT_TABLE_H |
3 | 3 | ||
4 | #define xen_alloc_vm_area(size) alloc_vm_area(size) | 4 | #define xen_alloc_vm_area(size) alloc_vm_area(size) |
5 | #define xen_free_vm_area(area) free_vm_area(area) | 5 | #define xen_free_vm_area(area) free_vm_area(area) |
6 | 6 | ||
7 | #endif /* __XEN_GRANT_TABLE_H */ | 7 | #endif /* ASM_X86__XEN__GRANT_TABLE_H */ |
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h index 91cb7fd5c123..44f4259bee3f 100644 --- a/include/asm-x86/xen/hypercall.h +++ b/include/asm-x86/xen/hypercall.h | |||
@@ -30,8 +30,8 @@ | |||
30 | * IN THE SOFTWARE. | 30 | * IN THE SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #ifndef __HYPERCALL_H__ | 33 | #ifndef ASM_X86__XEN__HYPERCALL_H |
34 | #define __HYPERCALL_H__ | 34 | #define ASM_X86__XEN__HYPERCALL_H |
35 | 35 | ||
36 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
@@ -524,4 +524,4 @@ MULTI_stack_switch(struct multicall_entry *mcl, | |||
524 | mcl->args[1] = esp; | 524 | mcl->args[1] = esp; |
525 | } | 525 | } |
526 | 526 | ||
527 | #endif /* __HYPERCALL_H__ */ | 527 | #endif /* ASM_X86__XEN__HYPERCALL_H */ |
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h index 04ee0610014a..0ef3a88b869d 100644 --- a/include/asm-x86/xen/hypervisor.h +++ b/include/asm-x86/xen/hypervisor.h | |||
@@ -30,8 +30,8 @@ | |||
30 | * IN THE SOFTWARE. | 30 | * IN THE SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #ifndef __HYPERVISOR_H__ | 33 | #ifndef ASM_X86__XEN__HYPERVISOR_H |
34 | #define __HYPERVISOR_H__ | 34 | #define ASM_X86__XEN__HYPERVISOR_H |
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
@@ -69,4 +69,4 @@ u64 jiffies_to_st(unsigned long jiffies); | |||
69 | 69 | ||
70 | #define is_running_on_xen() (xen_start_info ? 1 : 0) | 70 | #define is_running_on_xen() (xen_start_info ? 1 : 0) |
71 | 71 | ||
72 | #endif /* __HYPERVISOR_H__ */ | 72 | #endif /* ASM_X86__XEN__HYPERVISOR_H */ |
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h index 9d810f2538a2..d077bba96da9 100644 --- a/include/asm-x86/xen/interface.h +++ b/include/asm-x86/xen/interface.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * Copyright (c) 2004, K A Fraser | 6 | * Copyright (c) 2004, K A Fraser |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef __ASM_X86_XEN_INTERFACE_H | 9 | #ifndef ASM_X86__XEN__INTERFACE_H |
10 | #define __ASM_X86_XEN_INTERFACE_H | 10 | #define ASM_X86__XEN__INTERFACE_H |
11 | 11 | ||
12 | #ifdef __XEN__ | 12 | #ifdef __XEN__ |
13 | #define __DEFINE_GUEST_HANDLE(name, type) \ | 13 | #define __DEFINE_GUEST_HANDLE(name, type) \ |
@@ -172,4 +172,4 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); | |||
172 | #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" | 172 | #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" |
173 | #endif | 173 | #endif |
174 | 174 | ||
175 | #endif /* __ASM_X86_XEN_INTERFACE_H */ | 175 | #endif /* ASM_X86__XEN__INTERFACE_H */ |
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h index d8ac41d5db86..08167e19fc66 100644 --- a/include/asm-x86/xen/interface_32.h +++ b/include/asm-x86/xen/interface_32.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * Copyright (c) 2004, K A Fraser | 6 | * Copyright (c) 2004, K A Fraser |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef __ASM_X86_XEN_INTERFACE_32_H | 9 | #ifndef ASM_X86__XEN__INTERFACE_32_H |
10 | #define __ASM_X86_XEN_INTERFACE_32_H | 10 | #define ASM_X86__XEN__INTERFACE_32_H |
11 | 11 | ||
12 | 12 | ||
13 | /* | 13 | /* |
@@ -94,4 +94,4 @@ typedef struct xen_callback xen_callback_t; | |||
94 | #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) | 94 | #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) |
95 | #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) | 95 | #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) |
96 | 96 | ||
97 | #endif /* __ASM_X86_XEN_INTERFACE_32_H */ | 97 | #endif /* ASM_X86__XEN__INTERFACE_32_H */ |
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h index 842266ce96e6..046c0f1e01d4 100644 --- a/include/asm-x86/xen/interface_64.h +++ b/include/asm-x86/xen/interface_64.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_X86_XEN_INTERFACE_64_H | 1 | #ifndef ASM_X86__XEN__INTERFACE_64_H |
2 | #define __ASM_X86_XEN_INTERFACE_64_H | 2 | #define ASM_X86__XEN__INTERFACE_64_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * 64-bit segment selectors | 5 | * 64-bit segment selectors |
@@ -156,4 +156,4 @@ typedef unsigned long xen_callback_t; | |||
156 | #endif /* !__ASSEMBLY__ */ | 156 | #endif /* !__ASSEMBLY__ */ |
157 | 157 | ||
158 | 158 | ||
159 | #endif /* __ASM_X86_XEN_INTERFACE_64_H */ | 159 | #endif /* ASM_X86__XEN__INTERFACE_64_H */ |
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h index 7b3835d3b77d..c50185dccec1 100644 --- a/include/asm-x86/xen/page.h +++ b/include/asm-x86/xen/page.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __XEN_PAGE_H | 1 | #ifndef ASM_X86__XEN__PAGE_H |
2 | #define __XEN_PAGE_H | 2 | #define ASM_X86__XEN__PAGE_H |
3 | 3 | ||
4 | #include <linux/pfn.h> | 4 | #include <linux/pfn.h> |
5 | 5 | ||
@@ -162,4 +162,4 @@ xmaddr_t arbitrary_virt_to_machine(void *address); | |||
162 | void make_lowmem_page_readonly(void *vaddr); | 162 | void make_lowmem_page_readonly(void *vaddr); |
163 | void make_lowmem_page_readwrite(void *vaddr); | 163 | void make_lowmem_page_readwrite(void *vaddr); |
164 | 164 | ||
165 | #endif /* __XEN_PAGE_H */ | 165 | #endif /* ASM_X86__XEN__PAGE_H */ |
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h new file mode 100644 index 000000000000..08e9a1ac07a9 --- /dev/null +++ b/include/asm-x86/xsave.h | |||
@@ -0,0 +1,118 @@ | |||
1 | #ifndef __ASM_X86_XSAVE_H | ||
2 | #define __ASM_X86_XSAVE_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/processor.h> | ||
6 | #include <asm/i387.h> | ||
7 | |||
8 | #define XSTATE_FP 0x1 | ||
9 | #define XSTATE_SSE 0x2 | ||
10 | |||
11 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | ||
12 | |||
13 | #define FXSAVE_SIZE 512 | ||
14 | |||
15 | /* | ||
16 | * These are the features that the OS can handle currently. | ||
17 | */ | ||
18 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) | ||
19 | |||
20 | #ifdef CONFIG_X86_64 | ||
21 | #define REX_PREFIX "0x48, " | ||
22 | #else | ||
23 | #define REX_PREFIX | ||
24 | #endif | ||
25 | |||
26 | extern unsigned int xstate_size; | ||
27 | extern u64 pcntxt_mask; | ||
28 | extern struct xsave_struct *init_xstate_buf; | ||
29 | |||
30 | extern void xsave_cntxt_init(void); | ||
31 | extern void xsave_init(void); | ||
32 | extern int init_fpu(struct task_struct *child); | ||
33 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
34 | void __user *fpstate, | ||
35 | struct _fpx_sw_bytes *sw); | ||
36 | |||
37 | static inline int xrstor_checking(struct xsave_struct *fx) | ||
38 | { | ||
39 | int err; | ||
40 | |||
41 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | ||
42 | "2:\n" | ||
43 | ".section .fixup,\"ax\"\n" | ||
44 | "3: movl $-1,%[err]\n" | ||
45 | " jmp 2b\n" | ||
46 | ".previous\n" | ||
47 | _ASM_EXTABLE(1b, 3b) | ||
48 | : [err] "=r" (err) | ||
49 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) | ||
50 | : "memory"); | ||
51 | |||
52 | return err; | ||
53 | } | ||
54 | |||
55 | static inline int xsave_user(struct xsave_struct __user *buf) | ||
56 | { | ||
57 | int err; | ||
58 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | ||
59 | "2:\n" | ||
60 | ".section .fixup,\"ax\"\n" | ||
61 | "3: movl $-1,%[err]\n" | ||
62 | " jmp 2b\n" | ||
63 | ".previous\n" | ||
64 | ".section __ex_table,\"a\"\n" | ||
65 | _ASM_ALIGN "\n" | ||
66 | _ASM_PTR "1b,3b\n" | ||
67 | ".previous" | ||
68 | : [err] "=r" (err) | ||
69 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | ||
70 | : "memory"); | ||
71 | if (unlikely(err) && __clear_user(buf, xstate_size)) | ||
72 | err = -EFAULT; | ||
73 | /* No need to clear here because the caller clears USED_MATH */ | ||
74 | return err; | ||
75 | } | ||
76 | |||
77 | static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) | ||
78 | { | ||
79 | int err; | ||
80 | struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); | ||
81 | u32 lmask = mask; | ||
82 | u32 hmask = mask >> 32; | ||
83 | |||
84 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | ||
85 | "2:\n" | ||
86 | ".section .fixup,\"ax\"\n" | ||
87 | "3: movl $-1,%[err]\n" | ||
88 | " jmp 2b\n" | ||
89 | ".previous\n" | ||
90 | ".section __ex_table,\"a\"\n" | ||
91 | _ASM_ALIGN "\n" | ||
92 | _ASM_PTR "1b,3b\n" | ||
93 | ".previous" | ||
94 | : [err] "=r" (err) | ||
95 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) | ||
96 | : "memory"); /* memory required? */ | ||
97 | return err; | ||
98 | } | ||
99 | |||
100 | static inline void xrstor_state(struct xsave_struct *fx, u64 mask) | ||
101 | { | ||
102 | u32 lmask = mask; | ||
103 | u32 hmask = mask >> 32; | ||
104 | |||
105 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | ||
106 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
107 | : "memory"); | ||
108 | } | ||
109 | |||
110 | static inline void xsave(struct task_struct *tsk) | ||
111 | { | ||
112 | /* This, however, we can work around by forcing the compiler to select | ||
113 | an addressing mode that doesn't require extended registers. */ | ||
114 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | ||
115 | : : "D" (&(tsk->thread.xstate->xsave)), | ||
116 | "a" (-1), "d"(-1) : "memory"); | ||
117 | } | ||
118 | #endif | ||
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 56c73b847551..c360c558e59e 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -25,9 +25,99 @@ | |||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | 27 | ||
28 | #ifdef CONFIG_DMAR | 28 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) |
29 | struct intel_iommu; | 29 | struct intel_iommu; |
30 | 30 | ||
31 | struct dmar_drhd_unit { | ||
32 | struct list_head list; /* list of drhd units */ | ||
33 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
34 | u64 reg_base_addr; /* register base address*/ | ||
35 | struct pci_dev **devices; /* target device array */ | ||
36 | int devices_cnt; /* target device count */ | ||
37 | u8 ignored:1; /* ignore drhd */ | ||
38 | u8 include_all:1; | ||
39 | struct intel_iommu *iommu; | ||
40 | }; | ||
41 | |||
42 | extern struct list_head dmar_drhd_units; | ||
43 | |||
44 | #define for_each_drhd_unit(drhd) \ | ||
45 | list_for_each_entry(drhd, &dmar_drhd_units, list) | ||
46 | |||
47 | extern int dmar_table_init(void); | ||
48 | extern int early_dmar_detect(void); | ||
49 | extern int dmar_dev_scope_init(void); | ||
50 | |||
51 | /* Intel IOMMU detection */ | ||
52 | extern void detect_intel_iommu(void); | ||
53 | |||
54 | |||
55 | extern int parse_ioapics_under_ir(void); | ||
56 | extern int alloc_iommu(struct dmar_drhd_unit *); | ||
57 | #else | ||
58 | static inline void detect_intel_iommu(void) | ||
59 | { | ||
60 | return; | ||
61 | } | ||
62 | |||
63 | static inline int dmar_table_init(void) | ||
64 | { | ||
65 | return -ENODEV; | ||
66 | } | ||
67 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ | ||
68 | |||
69 | #ifdef CONFIG_INTR_REMAP | ||
70 | extern int intr_remapping_enabled; | ||
71 | extern int enable_intr_remapping(int); | ||
72 | |||
73 | struct irte { | ||
74 | union { | ||
75 | struct { | ||
76 | __u64 present : 1, | ||
77 | fpd : 1, | ||
78 | dst_mode : 1, | ||
79 | redir_hint : 1, | ||
80 | trigger_mode : 1, | ||
81 | dlvry_mode : 3, | ||
82 | avail : 4, | ||
83 | __reserved_1 : 4, | ||
84 | vector : 8, | ||
85 | __reserved_2 : 8, | ||
86 | dest_id : 32; | ||
87 | }; | ||
88 | __u64 low; | ||
89 | }; | ||
90 | |||
91 | union { | ||
92 | struct { | ||
93 | __u64 sid : 16, | ||
94 | sq : 2, | ||
95 | svt : 2, | ||
96 | __reserved_3 : 44; | ||
97 | }; | ||
98 | __u64 high; | ||
99 | }; | ||
100 | }; | ||
101 | extern int get_irte(int irq, struct irte *entry); | ||
102 | extern int modify_irte(int irq, struct irte *irte_modified); | ||
103 | extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | ||
104 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | ||
105 | u16 sub_handle); | ||
106 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); | ||
107 | extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); | ||
108 | extern int flush_irte(int irq); | ||
109 | extern int free_irte(int irq); | ||
110 | |||
111 | extern int irq_remapped(int irq); | ||
112 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | ||
113 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | ||
114 | #else | ||
115 | #define irq_remapped(irq) (0) | ||
116 | #define enable_intr_remapping(mode) (-1) | ||
117 | #define intr_remapping_enabled (0) | ||
118 | #endif | ||
119 | |||
120 | #ifdef CONFIG_DMAR | ||
31 | extern const char *dmar_get_fault_reason(u8 fault_reason); | 121 | extern const char *dmar_get_fault_reason(u8 fault_reason); |
32 | 122 | ||
33 | /* Can't use the common MSI interrupt functions | 123 | /* Can't use the common MSI interrupt functions |
@@ -40,47 +130,30 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg); | |||
40 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 130 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
41 | extern int arch_setup_dmar_msi(unsigned int irq); | 131 | extern int arch_setup_dmar_msi(unsigned int irq); |
42 | 132 | ||
43 | /* Intel IOMMU detection and initialization functions */ | 133 | extern int iommu_detected, no_iommu; |
44 | extern void detect_intel_iommu(void); | ||
45 | extern int intel_iommu_init(void); | ||
46 | |||
47 | extern int dmar_table_init(void); | ||
48 | extern int early_dmar_detect(void); | ||
49 | |||
50 | extern struct list_head dmar_drhd_units; | ||
51 | extern struct list_head dmar_rmrr_units; | 134 | extern struct list_head dmar_rmrr_units; |
52 | |||
53 | struct dmar_drhd_unit { | ||
54 | struct list_head list; /* list of drhd units */ | ||
55 | u64 reg_base_addr; /* register base address*/ | ||
56 | struct pci_dev **devices; /* target device array */ | ||
57 | int devices_cnt; /* target device count */ | ||
58 | u8 ignored:1; /* ignore drhd */ | ||
59 | u8 include_all:1; | ||
60 | struct intel_iommu *iommu; | ||
61 | }; | ||
62 | |||
63 | struct dmar_rmrr_unit { | 135 | struct dmar_rmrr_unit { |
64 | struct list_head list; /* list of rmrr units */ | 136 | struct list_head list; /* list of rmrr units */ |
137 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
65 | u64 base_address; /* reserved base address*/ | 138 | u64 base_address; /* reserved base address*/ |
66 | u64 end_address; /* reserved end address */ | 139 | u64 end_address; /* reserved end address */ |
67 | struct pci_dev **devices; /* target devices */ | 140 | struct pci_dev **devices; /* target devices */ |
68 | int devices_cnt; /* target device count */ | 141 | int devices_cnt; /* target device count */ |
69 | }; | 142 | }; |
70 | 143 | ||
71 | #define for_each_drhd_unit(drhd) \ | ||
72 | list_for_each_entry(drhd, &dmar_drhd_units, list) | ||
73 | #define for_each_rmrr_units(rmrr) \ | 144 | #define for_each_rmrr_units(rmrr) \ |
74 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) | 145 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) |
146 | /* Intel DMAR initialization functions */ | ||
147 | extern int intel_iommu_init(void); | ||
148 | extern int dmar_disabled; | ||
75 | #else | 149 | #else |
76 | static inline void detect_intel_iommu(void) | ||
77 | { | ||
78 | return; | ||
79 | } | ||
80 | static inline int intel_iommu_init(void) | 150 | static inline int intel_iommu_init(void) |
81 | { | 151 | { |
152 | #ifdef CONFIG_INTR_REMAP | ||
153 | return dmar_dev_scope_init(); | ||
154 | #else | ||
82 | return -ENODEV; | 155 | return -ENODEV; |
156 | #endif | ||
83 | } | 157 | } |
84 | |||
85 | #endif /* !CONFIG_DMAR */ | 158 | #endif /* !CONFIG_DMAR */ |
86 | #endif /* __DMAR_H__ */ | 159 | #endif /* __DMAR_H__ */ |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 8d3b7a9afd17..fded376b94e3 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -108,6 +108,9 @@ extern struct resource iomem_resource; | |||
108 | 108 | ||
109 | extern int request_resource(struct resource *root, struct resource *new); | 109 | extern int request_resource(struct resource *root, struct resource *new); |
110 | extern int release_resource(struct resource *new); | 110 | extern int release_resource(struct resource *new); |
111 | extern void reserve_region_with_split(struct resource *root, | ||
112 | resource_size_t start, resource_size_t end, | ||
113 | const char *name); | ||
111 | extern int insert_resource(struct resource *parent, struct resource *new); | 114 | extern int insert_resource(struct resource *parent, struct resource *new); |
112 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); | 115 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); |
113 | extern int allocate_resource(struct resource *root, struct resource *new, | 116 | extern int allocate_resource(struct resource *root, struct resource *new, |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 8ccb462ea42c..8d9411bc60f6 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -62,6 +62,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
62 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 62 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ |
63 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 63 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
64 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 64 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ |
65 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | ||
65 | 66 | ||
66 | #ifdef CONFIG_IRQ_PER_CPU | 67 | #ifdef CONFIG_IRQ_PER_CPU |
67 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 68 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index fac3337547eb..9f2a3751873a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -23,12 +23,19 @@ | |||
23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ | 23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ |
24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | 24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ |
25 | ____cacheline_aligned_in_smp | 25 | ____cacheline_aligned_in_smp |
26 | |||
27 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
28 | __attribute__((__section__(".data.percpu.page_aligned"))) \ | ||
29 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
26 | #else | 30 | #else |
27 | #define DEFINE_PER_CPU(type, name) \ | 31 | #define DEFINE_PER_CPU(type, name) \ |
28 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 32 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
29 | 33 | ||
30 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 34 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
31 | DEFINE_PER_CPU(type, name) | 35 | DEFINE_PER_CPU(type, name) |
36 | |||
37 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
38 | DEFINE_PER_CPU(type, name) | ||
32 | #endif | 39 | #endif |
33 | 40 | ||
34 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 41 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0314074fa232..60c49e324390 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -89,7 +89,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
89 | set_balance_irq_affinity(irq, cpumask); | 89 | set_balance_irq_affinity(irq, cpumask); |
90 | 90 | ||
91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
92 | set_pending_irq(irq, cpumask); | 92 | if (desc->status & IRQ_MOVE_PCNTXT) { |
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&desc->lock, flags); | ||
96 | desc->chip->set_affinity(irq, cpumask); | ||
97 | spin_unlock_irqrestore(&desc->lock, flags); | ||
98 | } else | ||
99 | set_pending_irq(irq, cpumask); | ||
93 | #else | 100 | #else |
94 | desc->affinity = cpumask; | 101 | desc->affinity = cpumask; |
95 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
diff --git a/kernel/resource.c b/kernel/resource.c index 03d796c1b2e9..414d6fc9131e 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -516,6 +516,74 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t | |||
516 | return result; | 516 | return result; |
517 | } | 517 | } |
518 | 518 | ||
519 | static void __init __reserve_region_with_split(struct resource *root, | ||
520 | resource_size_t start, resource_size_t end, | ||
521 | const char *name) | ||
522 | { | ||
523 | struct resource *parent = root; | ||
524 | struct resource *conflict; | ||
525 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | ||
526 | |||
527 | if (!res) | ||
528 | return; | ||
529 | |||
530 | res->name = name; | ||
531 | res->start = start; | ||
532 | res->end = end; | ||
533 | res->flags = IORESOURCE_BUSY; | ||
534 | |||
535 | for (;;) { | ||
536 | conflict = __request_resource(parent, res); | ||
537 | if (!conflict) | ||
538 | break; | ||
539 | if (conflict != parent) { | ||
540 | parent = conflict; | ||
541 | if (!(conflict->flags & IORESOURCE_BUSY)) | ||
542 | continue; | ||
543 | } | ||
544 | |||
545 | /* Uhhuh, that didn't work out.. */ | ||
546 | kfree(res); | ||
547 | res = NULL; | ||
548 | break; | ||
549 | } | ||
550 | |||
551 | if (!res) { | ||
552 | printk(KERN_DEBUG " __reserve_region_with_split: (%s) [%llx, %llx], res: (%s) [%llx, %llx]\n", | ||
553 | conflict->name, conflict->start, conflict->end, | ||
554 | name, start, end); | ||
555 | |||
556 | /* failed, split and try again */ | ||
557 | |||
558 | /* conflict coverred whole area */ | ||
559 | if (conflict->start <= start && conflict->end >= end) | ||
560 | return; | ||
561 | |||
562 | if (conflict->start > start) | ||
563 | __reserve_region_with_split(root, start, conflict->start-1, name); | ||
564 | if (!(conflict->flags & IORESOURCE_BUSY)) { | ||
565 | resource_size_t common_start, common_end; | ||
566 | |||
567 | common_start = max(conflict->start, start); | ||
568 | common_end = min(conflict->end, end); | ||
569 | if (common_start < common_end) | ||
570 | __reserve_region_with_split(root, common_start, common_end, name); | ||
571 | } | ||
572 | if (conflict->end < end) | ||
573 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
574 | } | ||
575 | |||
576 | } | ||
577 | |||
578 | void reserve_region_with_split(struct resource *root, | ||
579 | resource_size_t start, resource_size_t end, | ||
580 | const char *name) | ||
581 | { | ||
582 | write_lock(&resource_lock); | ||
583 | __reserve_region_with_split(root, start, end, name); | ||
584 | write_unlock(&resource_lock); | ||
585 | } | ||
586 | |||
519 | EXPORT_SYMBOL(adjust_resource); | 587 | EXPORT_SYMBOL(adjust_resource); |
520 | 588 | ||
521 | /** | 589 | /** |