diff options
68 files changed, 3019 insertions, 2317 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 60a85768cfcb..f8843c3ae77d 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
| @@ -419,6 +419,60 @@ config X86_DEBUGCTLMSR | |||
| 419 | def_bool y | 419 | def_bool y |
| 420 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) | 420 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) |
| 421 | 421 | ||
| 422 | menuconfig PROCESSOR_SELECT | ||
| 423 | default y | ||
| 424 | bool "Supported processor vendors" if EMBEDDED | ||
| 425 | help | ||
| 426 | This lets you choose what x86 vendor support code your kernel | ||
| 427 | will include. | ||
| 428 | |||
| 429 | config CPU_SUP_INTEL | ||
| 430 | default y | ||
| 431 | bool "Support Intel processors" if PROCESSOR_SELECT | ||
| 432 | help | ||
| 433 | This enables extended support for Intel processors | ||
| 434 | |||
| 435 | config CPU_SUP_CYRIX_32 | ||
| 436 | default y | ||
| 437 | bool "Support Cyrix processors" if PROCESSOR_SELECT | ||
| 438 | depends on !64BIT | ||
| 439 | help | ||
| 440 | This enables extended support for Cyrix processors | ||
| 441 | |||
| 442 | config CPU_SUP_AMD | ||
| 443 | default y | ||
| 444 | bool "Support AMD processors" if PROCESSOR_SELECT | ||
| 445 | help | ||
| 446 | This enables extended support for AMD processors | ||
| 447 | |||
| 448 | config CPU_SUP_CENTAUR_32 | ||
| 449 | default y | ||
| 450 | bool "Support Centaur processors" if PROCESSOR_SELECT | ||
| 451 | depends on !64BIT | ||
| 452 | help | ||
| 453 | This enables extended support for Centaur processors | ||
| 454 | |||
| 455 | config CPU_SUP_CENTAUR_64 | ||
| 456 | default y | ||
| 457 | bool "Support Centaur processors" if PROCESSOR_SELECT | ||
| 458 | depends on 64BIT | ||
| 459 | help | ||
| 460 | This enables extended support for Centaur processors | ||
| 461 | |||
| 462 | config CPU_SUP_TRANSMETA_32 | ||
| 463 | default y | ||
| 464 | bool "Support Transmeta processors" if PROCESSOR_SELECT | ||
| 465 | depends on !64BIT | ||
| 466 | help | ||
| 467 | This enables extended support for Transmeta processors | ||
| 468 | |||
| 469 | config CPU_SUP_UMC_32 | ||
| 470 | default y | ||
| 471 | bool "Support UMC processors" if PROCESSOR_SELECT | ||
| 472 | depends on !64BIT | ||
| 473 | help | ||
| 474 | This enables extended support for UMC processors | ||
| 475 | |||
| 422 | config X86_DS | 476 | config X86_DS |
| 423 | bool "Debug Store support" | 477 | bool "Debug Store support" |
| 424 | default y | 478 | default y |
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c index 75298fe2edca..6ec6bb6e9957 100644 --- a/arch/x86/boot/cpu.c +++ b/arch/x86/boot/cpu.c | |||
| @@ -59,17 +59,18 @@ int validate_cpu(void) | |||
| 59 | u32 e = err_flags[i]; | 59 | u32 e = err_flags[i]; |
| 60 | 60 | ||
| 61 | for (j = 0; j < 32; j++) { | 61 | for (j = 0; j < 32; j++) { |
| 62 | int n = (i << 5)+j; | 62 | if (msg_strs[0] < i || |
| 63 | if (*msg_strs < n) { | 63 | (msg_strs[0] == i && msg_strs[1] < j)) { |
| 64 | /* Skip to the next string */ | 64 | /* Skip to the next string */ |
| 65 | do { | 65 | msg_strs += 2; |
| 66 | msg_strs++; | 66 | while (*msg_strs++) |
| 67 | } while (*msg_strs); | 67 | ; |
| 68 | msg_strs++; | ||
| 69 | } | 68 | } |
| 70 | if (e & 1) { | 69 | if (e & 1) { |
| 71 | if (*msg_strs == n && msg_strs[1]) | 70 | if (msg_strs[0] == i && |
| 72 | printf("%s ", msg_strs+1); | 71 | msg_strs[1] == j && |
| 72 | msg_strs[2]) | ||
| 73 | printf("%s ", msg_strs+2); | ||
| 73 | else | 74 | else |
| 74 | printf("%d:%d ", i, j); | 75 | printf("%d:%d ", i, j); |
| 75 | } | 76 | } |
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c index bbe76953bae9..8ef60f20b371 100644 --- a/arch/x86/boot/mkcpustr.c +++ b/arch/x86/boot/mkcpustr.c | |||
| @@ -15,33 +15,33 @@ | |||
| 15 | 15 | ||
| 16 | #include <stdio.h> | 16 | #include <stdio.h> |
| 17 | 17 | ||
| 18 | #include "../kernel/cpu/feature_names.c" | 18 | #include "../kernel/cpu/capflags.c" |
| 19 | |||
| 20 | #if NCAPFLAGS > 8 | ||
| 21 | # error "Need to adjust the boot code handling of CPUID strings" | ||
| 22 | #endif | ||
| 23 | 19 | ||
| 24 | int main(void) | 20 | int main(void) |
| 25 | { | 21 | { |
| 26 | int i; | 22 | int i, j; |
| 27 | const char *str; | 23 | const char *str; |
| 28 | 24 | ||
| 29 | printf("static const char x86_cap_strs[] = \n"); | 25 | printf("static const char x86_cap_strs[] = \n"); |
| 30 | 26 | ||
| 31 | for (i = 0; i < NCAPINTS*32; i++) { | 27 | for (i = 0; i < NCAPINTS; i++) { |
| 32 | str = x86_cap_flags[i]; | 28 | for (j = 0; j < 32; j++) { |
| 33 | 29 | str = x86_cap_flags[i*32+j]; | |
| 34 | if (i == NCAPINTS*32-1) { | 30 | |
| 35 | /* The last entry must be unconditional; this | 31 | if (i == NCAPINTS-1 && j == 31) { |
| 36 | also consumes the compiler-added null character */ | 32 | /* The last entry must be unconditional; this |
| 37 | if (!str) | 33 | also consumes the compiler-added null |
| 38 | str = ""; | 34 | character */ |
| 39 | printf("\t\"\\x%02x\"\"%s\"\n", i, str); | 35 | if (!str) |
| 40 | } else if (str) { | 36 | str = ""; |
| 41 | printf("#if REQUIRED_MASK%d & (1 << %d)\n" | 37 | printf("\t\"\\x%02x\\x%02x\"\"%s\"\n", |
| 42 | "\t\"\\x%02x\"\"%s\\0\"\n" | 38 | i, j, str); |
| 43 | "#endif\n", | 39 | } else if (str) { |
| 44 | i >> 5, i & 31, i, str); | 40 | printf("#if REQUIRED_MASK%d & (1 << %d)\n" |
| 41 | "\t\"\\x%02x\\x%02x\"\"%s\\0\"\n" | ||
| 42 | "#endif\n", | ||
| 43 | i, j, i, j, str); | ||
| 44 | } | ||
| 45 | } | 45 | } |
| 46 | } | 46 | } |
| 47 | printf("\t;\n"); | 47 | printf("\t;\n"); |
diff --git a/arch/x86/es7000/Makefile b/arch/x86/es7000/Makefile deleted file mode 100644 index 3ef8b43b62fc..000000000000 --- a/arch/x86/es7000/Makefile +++ /dev/null | |||
| @@ -1,5 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Makefile for the linux kernel. | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-$(CONFIG_X86_ES7000) := es7000plat.o | ||
diff --git a/arch/x86/es7000/es7000.h b/arch/x86/es7000/es7000.h deleted file mode 100644 index 4e62f6fa95b8..000000000000 --- a/arch/x86/es7000/es7000.h +++ /dev/null | |||
| @@ -1,114 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Written by: Garry Forsgren, Unisys Corporation | ||
| 3 | * Natalie Protasevich, Unisys Corporation | ||
| 4 | * This file contains the code to configure and interface | ||
| 5 | * with Unisys ES7000 series hardware system manager. | ||
| 6 | * | ||
| 7 | * Copyright (c) 2003 Unisys Corporation. All Rights Reserved. | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms of version 2 of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it would be useful, but | ||
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License along | ||
| 18 | * with this program; if not, write the Free Software Foundation, Inc., 59 | ||
| 19 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
| 20 | * | ||
| 21 | * Contact information: Unisys Corporation, Township Line & Union Meeting | ||
| 22 | * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: | ||
| 23 | * | ||
| 24 | * http://www.unisys.com | ||
| 25 | */ | ||
| 26 | |||
| 27 | /* | ||
| 28 | * ES7000 chipsets | ||
| 29 | */ | ||
| 30 | |||
| 31 | #define NON_UNISYS 0 | ||
| 32 | #define ES7000_CLASSIC 1 | ||
| 33 | #define ES7000_ZORRO 2 | ||
| 34 | |||
| 35 | |||
| 36 | #define MIP_REG 1 | ||
| 37 | #define MIP_PSAI_REG 4 | ||
| 38 | |||
| 39 | #define MIP_BUSY 1 | ||
| 40 | #define MIP_SPIN 0xf0000 | ||
| 41 | #define MIP_VALID 0x0100000000000000ULL | ||
| 42 | #define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) | ||
| 43 | |||
| 44 | #define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) | ||
| 45 | |||
| 46 | struct mip_reg_info { | ||
| 47 | unsigned long long mip_info; | ||
| 48 | unsigned long long delivery_info; | ||
| 49 | unsigned long long host_reg; | ||
| 50 | unsigned long long mip_reg; | ||
| 51 | }; | ||
| 52 | |||
| 53 | struct part_info { | ||
| 54 | unsigned char type; | ||
| 55 | unsigned char length; | ||
| 56 | unsigned char part_id; | ||
| 57 | unsigned char apic_mode; | ||
| 58 | unsigned long snum; | ||
| 59 | char ptype[16]; | ||
| 60 | char sname[64]; | ||
| 61 | char pname[64]; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct psai { | ||
| 65 | unsigned long long entry_type; | ||
| 66 | unsigned long long addr; | ||
| 67 | unsigned long long bep_addr; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct es7000_mem_info { | ||
| 71 | unsigned char type; | ||
| 72 | unsigned char length; | ||
| 73 | unsigned char resv[6]; | ||
| 74 | unsigned long long start; | ||
| 75 | unsigned long long size; | ||
| 76 | }; | ||
| 77 | |||
| 78 | struct es7000_oem_table { | ||
| 79 | unsigned long long hdr; | ||
| 80 | struct mip_reg_info mip; | ||
| 81 | struct part_info pif; | ||
| 82 | struct es7000_mem_info shm; | ||
| 83 | struct psai psai; | ||
| 84 | }; | ||
| 85 | |||
| 86 | #ifdef CONFIG_ACPI | ||
| 87 | |||
| 88 | struct oem_table { | ||
| 89 | struct acpi_table_header Header; | ||
| 90 | u32 OEMTableAddr; | ||
| 91 | u32 OEMTableSize; | ||
| 92 | }; | ||
| 93 | |||
| 94 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | ||
| 95 | #endif | ||
| 96 | |||
| 97 | struct mip_reg { | ||
| 98 | unsigned long long off_0; | ||
| 99 | unsigned long long off_8; | ||
| 100 | unsigned long long off_10; | ||
| 101 | unsigned long long off_18; | ||
| 102 | unsigned long long off_20; | ||
| 103 | unsigned long long off_28; | ||
| 104 | unsigned long long off_30; | ||
| 105 | unsigned long long off_38; | ||
| 106 | }; | ||
| 107 | |||
| 108 | #define MIP_SW_APIC 0x1020b | ||
| 109 | #define MIP_FUNC(VALUE) (VALUE & 0xff) | ||
| 110 | |||
| 111 | extern int parse_unisys_oem (char *oemptr); | ||
| 112 | extern void setup_unisys(void); | ||
| 113 | extern int es7000_start_cpu(int cpu, unsigned long eip); | ||
| 114 | extern void es7000_sw_apic(void); | ||
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index f1a2ac777faf..8d64c1bc8474 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
| @@ -179,9 +179,10 @@ struct sigframe | |||
| 179 | u32 pretcode; | 179 | u32 pretcode; |
| 180 | int sig; | 180 | int sig; |
| 181 | struct sigcontext_ia32 sc; | 181 | struct sigcontext_ia32 sc; |
| 182 | struct _fpstate_ia32 fpstate; | 182 | struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */ |
| 183 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; | 183 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; |
| 184 | char retcode[8]; | 184 | char retcode[8]; |
| 185 | /* fp state follows here */ | ||
| 185 | }; | 186 | }; |
| 186 | 187 | ||
| 187 | struct rt_sigframe | 188 | struct rt_sigframe |
| @@ -192,8 +193,8 @@ struct rt_sigframe | |||
| 192 | u32 puc; | 193 | u32 puc; |
| 193 | compat_siginfo_t info; | 194 | compat_siginfo_t info; |
| 194 | struct ucontext_ia32 uc; | 195 | struct ucontext_ia32 uc; |
| 195 | struct _fpstate_ia32 fpstate; | ||
| 196 | char retcode[8]; | 196 | char retcode[8]; |
| 197 | /* fp state follows here */ | ||
| 197 | }; | 198 | }; |
| 198 | 199 | ||
| 199 | #define COPY(x) { \ | 200 | #define COPY(x) { \ |
| @@ -215,7 +216,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
| 215 | unsigned int *peax) | 216 | unsigned int *peax) |
| 216 | { | 217 | { |
| 217 | unsigned int tmpflags, gs, oldgs, err = 0; | 218 | unsigned int tmpflags, gs, oldgs, err = 0; |
| 218 | struct _fpstate_ia32 __user *buf; | 219 | void __user *buf; |
| 219 | u32 tmp; | 220 | u32 tmp; |
| 220 | 221 | ||
| 221 | /* Always make any pending restarted system calls return -EINTR */ | 222 | /* Always make any pending restarted system calls return -EINTR */ |
| @@ -259,26 +260,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
| 259 | 260 | ||
| 260 | err |= __get_user(tmp, &sc->fpstate); | 261 | err |= __get_user(tmp, &sc->fpstate); |
| 261 | buf = compat_ptr(tmp); | 262 | buf = compat_ptr(tmp); |
| 262 | if (buf) { | 263 | err |= restore_i387_xstate_ia32(buf); |
| 263 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
| 264 | goto badframe; | ||
| 265 | err |= restore_i387_ia32(buf); | ||
| 266 | } else { | ||
| 267 | struct task_struct *me = current; | ||
| 268 | |||
| 269 | if (used_math()) { | ||
| 270 | clear_fpu(me); | ||
| 271 | clear_used_math(); | ||
| 272 | } | ||
| 273 | } | ||
| 274 | 264 | ||
| 275 | err |= __get_user(tmp, &sc->ax); | 265 | err |= __get_user(tmp, &sc->ax); |
| 276 | *peax = tmp; | 266 | *peax = tmp; |
| 277 | 267 | ||
| 278 | return err; | 268 | return err; |
| 279 | |||
| 280 | badframe: | ||
| 281 | return 1; | ||
| 282 | } | 269 | } |
| 283 | 270 | ||
| 284 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) | 271 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) |
| @@ -350,7 +337,7 @@ badframe: | |||
| 350 | */ | 337 | */ |
| 351 | 338 | ||
| 352 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | 339 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, |
| 353 | struct _fpstate_ia32 __user *fpstate, | 340 | void __user *fpstate, |
| 354 | struct pt_regs *regs, unsigned int mask) | 341 | struct pt_regs *regs, unsigned int mask) |
| 355 | { | 342 | { |
| 356 | int tmp, err = 0; | 343 | int tmp, err = 0; |
| @@ -380,7 +367,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
| 380 | err |= __put_user((u32)regs->flags, &sc->flags); | 367 | err |= __put_user((u32)regs->flags, &sc->flags); |
| 381 | err |= __put_user((u32)regs->sp, &sc->sp_at_signal); | 368 | err |= __put_user((u32)regs->sp, &sc->sp_at_signal); |
| 382 | 369 | ||
| 383 | tmp = save_i387_ia32(fpstate); | 370 | tmp = save_i387_xstate_ia32(fpstate); |
| 384 | if (tmp < 0) | 371 | if (tmp < 0) |
| 385 | err = -EFAULT; | 372 | err = -EFAULT; |
| 386 | else { | 373 | else { |
| @@ -401,7 +388,8 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
| 401 | * Determine which stack to use.. | 388 | * Determine which stack to use.. |
| 402 | */ | 389 | */ |
| 403 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 390 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
| 404 | size_t frame_size) | 391 | size_t frame_size, |
| 392 | void **fpstate) | ||
| 405 | { | 393 | { |
| 406 | unsigned long sp; | 394 | unsigned long sp; |
| 407 | 395 | ||
| @@ -420,6 +408,11 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
| 420 | ka->sa.sa_restorer) | 408 | ka->sa.sa_restorer) |
| 421 | sp = (unsigned long) ka->sa.sa_restorer; | 409 | sp = (unsigned long) ka->sa.sa_restorer; |
| 422 | 410 | ||
| 411 | if (used_math()) { | ||
| 412 | sp = sp - sig_xstate_ia32_size; | ||
| 413 | *fpstate = (struct _fpstate_ia32 *) sp; | ||
| 414 | } | ||
| 415 | |||
| 423 | sp -= frame_size; | 416 | sp -= frame_size; |
| 424 | /* Align the stack pointer according to the i386 ABI, | 417 | /* Align the stack pointer according to the i386 ABI, |
| 425 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | 418 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ |
| @@ -433,6 +426,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
| 433 | struct sigframe __user *frame; | 426 | struct sigframe __user *frame; |
| 434 | void __user *restorer; | 427 | void __user *restorer; |
| 435 | int err = 0; | 428 | int err = 0; |
| 429 | void __user *fpstate = NULL; | ||
| 436 | 430 | ||
| 437 | /* copy_to_user optimizes that into a single 8 byte store */ | 431 | /* copy_to_user optimizes that into a single 8 byte store */ |
| 438 | static const struct { | 432 | static const struct { |
| @@ -447,7 +441,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
| 447 | 0, | 441 | 0, |
| 448 | }; | 442 | }; |
| 449 | 443 | ||
| 450 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 444 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
| 451 | 445 | ||
| 452 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 446 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
| 453 | goto give_sigsegv; | 447 | goto give_sigsegv; |
| @@ -456,8 +450,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
| 456 | if (err) | 450 | if (err) |
| 457 | goto give_sigsegv; | 451 | goto give_sigsegv; |
| 458 | 452 | ||
| 459 | err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, | 453 | err |= ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]); |
| 460 | set->sig[0]); | ||
| 461 | if (err) | 454 | if (err) |
| 462 | goto give_sigsegv; | 455 | goto give_sigsegv; |
| 463 | 456 | ||
| @@ -521,6 +514,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 521 | struct rt_sigframe __user *frame; | 514 | struct rt_sigframe __user *frame; |
| 522 | void __user *restorer; | 515 | void __user *restorer; |
| 523 | int err = 0; | 516 | int err = 0; |
| 517 | void __user *fpstate = NULL; | ||
| 524 | 518 | ||
| 525 | /* __copy_to_user optimizes that into a single 8 byte store */ | 519 | /* __copy_to_user optimizes that into a single 8 byte store */ |
| 526 | static const struct { | 520 | static const struct { |
| @@ -536,7 +530,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 536 | 0, | 530 | 0, |
| 537 | }; | 531 | }; |
| 538 | 532 | ||
| 539 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 533 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
| 540 | 534 | ||
| 541 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 535 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
| 542 | goto give_sigsegv; | 536 | goto give_sigsegv; |
| @@ -549,13 +543,16 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 549 | goto give_sigsegv; | 543 | goto give_sigsegv; |
| 550 | 544 | ||
| 551 | /* Create the ucontext. */ | 545 | /* Create the ucontext. */ |
| 552 | err |= __put_user(0, &frame->uc.uc_flags); | 546 | if (cpu_has_xsave) |
| 547 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
| 548 | else | ||
| 549 | err |= __put_user(0, &frame->uc.uc_flags); | ||
| 553 | err |= __put_user(0, &frame->uc.uc_link); | 550 | err |= __put_user(0, &frame->uc.uc_link); |
| 554 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 551 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
| 555 | err |= __put_user(sas_ss_flags(regs->sp), | 552 | err |= __put_user(sas_ss_flags(regs->sp), |
| 556 | &frame->uc.uc_stack.ss_flags); | 553 | &frame->uc.uc_stack.ss_flags); |
| 557 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 554 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
| 558 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 555 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
| 559 | regs, set->sig[0]); | 556 | regs, set->sig[0]); |
| 560 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 557 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
| 561 | if (err) | 558 | if (err) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index a07ec14f3312..c9be69fedb70 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
| @@ -38,7 +38,7 @@ obj-y += tsc.o io_delay.o rtc.o | |||
| 38 | 38 | ||
| 39 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 39 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o |
| 40 | obj-y += process.o | 40 | obj-y += process.o |
| 41 | obj-y += i387.o | 41 | obj-y += i387.o xsave.o |
| 42 | obj-y += ptrace.o | 42 | obj-y += ptrace.o |
| 43 | obj-y += ds.o | 43 | obj-y += ds.o |
| 44 | obj-$(CONFIG_X86_32) += tls.o | 44 | obj-$(CONFIG_X86_32) += tls.o |
| @@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | |||
| 69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
| 70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
| 71 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o | 71 | obj-$(CONFIG_X86_NUMAQ) += numaq_32.o |
| 72 | obj-$(CONFIG_X86_ES7000) += es7000_32.o | ||
| 72 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o | 73 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o |
| 73 | obj-y += vsmp_64.o | 74 | obj-y += vsmp_64.o |
| 74 | obj-$(CONFIG_KPROBES) += kprobes.o | 75 | obj-$(CONFIG_KPROBES) += kprobes.o |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index ee76eaad3001..7f0b45a5d788 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
| @@ -3,22 +3,30 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
| 6 | obj-y += proc.o feature_names.o | 6 | obj-y += proc.o capflags.o powerflags.o common.o |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_X86_32) += common.o bugs.o | 8 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o |
| 9 | obj-$(CONFIG_X86_64) += common_64.o bugs_64.o | 9 | obj-$(CONFIG_X86_64) += bugs_64.o |
| 10 | obj-$(CONFIG_X86_32) += amd.o | 10 | |
| 11 | obj-$(CONFIG_X86_64) += amd_64.o | 11 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o |
| 12 | obj-$(CONFIG_X86_32) += cyrix.o | 12 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
| 13 | obj-$(CONFIG_X86_32) += centaur.o | 13 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
| 14 | obj-$(CONFIG_X86_64) += centaur_64.o | 14 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o |
| 15 | obj-$(CONFIG_X86_32) += transmeta.o | 15 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o |
| 16 | obj-$(CONFIG_X86_32) += intel.o | 16 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
| 17 | obj-$(CONFIG_X86_64) += intel_64.o | 17 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o |
| 18 | obj-$(CONFIG_X86_32) += umc.o | ||
| 19 | 18 | ||
| 20 | obj-$(CONFIG_X86_MCE) += mcheck/ | 19 | obj-$(CONFIG_X86_MCE) += mcheck/ |
| 21 | obj-$(CONFIG_MTRR) += mtrr/ | 20 | obj-$(CONFIG_MTRR) += mtrr/ |
| 22 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 21 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
| 23 | 22 | ||
| 24 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 23 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
| 24 | |||
| 25 | quiet_cmd_mkcapflags = MKCAP $@ | ||
| 26 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ | ||
| 27 | |||
| 28 | cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h | ||
| 29 | |||
| 30 | targets += capflags.c | ||
| 31 | $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE | ||
| 32 | $(call if_changed,mkcapflags) | ||
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index a6ef672adbba..0d9c993aa93e 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | #include <asm/pat.h> | 7 | #include <asm/pat.h> |
| 8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
| 9 | 9 | ||
| 10 | #include <mach_apic.h> | ||
| 11 | |||
| 10 | struct cpuid_bit { | 12 | struct cpuid_bit { |
| 11 | u16 feature; | 13 | u16 feature; |
| 12 | u8 reg; | 14 | u8 reg; |
| @@ -48,6 +50,92 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
| 48 | } | 50 | } |
| 49 | } | 51 | } |
| 50 | 52 | ||
| 53 | /* leaf 0xb SMT level */ | ||
| 54 | #define SMT_LEVEL 0 | ||
| 55 | |||
| 56 | /* leaf 0xb sub-leaf types */ | ||
| 57 | #define INVALID_TYPE 0 | ||
| 58 | #define SMT_TYPE 1 | ||
| 59 | #define CORE_TYPE 2 | ||
| 60 | |||
| 61 | #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) | ||
| 62 | #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) | ||
| 63 | #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Check for extended topology enumeration cpuid leaf 0xb and if it | ||
| 67 | * exists, use it for populating initial_apicid and cpu topology | ||
| 68 | * detection. | ||
| 69 | */ | ||
| 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | ||
| 71 | { | ||
| 72 | #ifdef CONFIG_SMP | ||
| 73 | unsigned int eax, ebx, ecx, edx, sub_index; | ||
| 74 | unsigned int ht_mask_width, core_plus_mask_width; | ||
| 75 | unsigned int core_select_mask, core_level_siblings; | ||
| 76 | |||
| 77 | if (c->cpuid_level < 0xb) | ||
| 78 | return; | ||
| 79 | |||
| 80 | cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); | ||
| 81 | |||
| 82 | /* | ||
| 83 | * check if the cpuid leaf 0xb is actually implemented. | ||
| 84 | */ | ||
| 85 | if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) | ||
| 86 | return; | ||
| 87 | |||
| 88 | set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); | ||
| 89 | |||
| 90 | /* | ||
| 91 | * initial apic id, which also represents 32-bit extended x2apic id. | ||
| 92 | */ | ||
| 93 | c->initial_apicid = edx; | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Populate HT related information from sub-leaf level 0. | ||
| 97 | */ | ||
| 98 | core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); | ||
| 99 | core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); | ||
| 100 | |||
| 101 | sub_index = 1; | ||
| 102 | do { | ||
| 103 | cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Check for the Core type in the implemented sub leaves. | ||
| 107 | */ | ||
| 108 | if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { | ||
| 109 | core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); | ||
| 110 | core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); | ||
| 111 | break; | ||
| 112 | } | ||
| 113 | |||
| 114 | sub_index++; | ||
| 115 | } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); | ||
| 116 | |||
| 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; | ||
| 118 | |||
| 119 | #ifdef CONFIG_X86_32 | ||
| 120 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) | ||
| 121 | & core_select_mask; | ||
| 122 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); | ||
| 123 | #else | ||
| 124 | c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; | ||
| 125 | c->phys_proc_id = phys_pkg_id(core_plus_mask_width); | ||
| 126 | #endif | ||
| 127 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | ||
| 128 | |||
| 129 | |||
| 130 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
| 131 | c->phys_proc_id); | ||
| 132 | if (c->x86_max_cores > 1) | ||
| 133 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
| 134 | c->cpu_core_id); | ||
| 135 | return; | ||
| 136 | #endif | ||
| 137 | } | ||
| 138 | |||
| 51 | #ifdef CONFIG_X86_PAT | 139 | #ifdef CONFIG_X86_PAT |
| 52 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | 140 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) |
| 53 | { | 141 | { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 18514ed26104..32e73520adf7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -1,13 +1,22 @@ | |||
| 1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
| 2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
| 3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
| 4 | |||
| 4 | #include <asm/io.h> | 5 | #include <asm/io.h> |
| 5 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
| 6 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
| 7 | 8 | ||
| 9 | #ifdef CONFIG_X86_64 | ||
| 10 | # include <asm/numa_64.h> | ||
| 11 | # include <asm/mmconfig.h> | ||
| 12 | # include <asm/cacheflush.h> | ||
| 13 | #endif | ||
| 14 | |||
| 8 | #include <mach_apic.h> | 15 | #include <mach_apic.h> |
| 16 | |||
| 9 | #include "cpu.h" | 17 | #include "cpu.h" |
| 10 | 18 | ||
| 19 | #ifdef CONFIG_X86_32 | ||
| 11 | /* | 20 | /* |
| 12 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | 21 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause |
| 13 | * misexecution of code under Linux. Owners of such processors should | 22 | * misexecution of code under Linux. Owners of such processors should |
| @@ -24,26 +33,273 @@ | |||
| 24 | extern void vide(void); | 33 | extern void vide(void); |
| 25 | __asm__(".align 4\nvide: ret"); | 34 | __asm__(".align 4\nvide: ret"); |
| 26 | 35 | ||
| 27 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 36 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) |
| 28 | { | 37 | { |
| 29 | if (cpuid_eax(0x80000000) >= 0x80000007) { | 38 | /* |
| 30 | c->x86_power = cpuid_edx(0x80000007); | 39 | * General Systems BIOSen alias the cpu frequency registers |
| 31 | if (c->x86_power & (1<<8)) | 40 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux |
| 32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 41 | * drivers subsequently pokes it, and changes the CPU speed. |
| 42 | * Workaround : Remove the unneeded alias. | ||
| 43 | */ | ||
| 44 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | ||
| 45 | #define CBAR_ENB (0x80000000) | ||
| 46 | #define CBAR_KEY (0X000000CB) | ||
| 47 | if (c->x86_model == 9 || c->x86_model == 10) { | ||
| 48 | if (inl (CBAR) & CBAR_ENB) | ||
| 49 | outl (0 | CBAR_KEY, CBAR); | ||
| 33 | } | 50 | } |
| 34 | |||
| 35 | /* Set MTRR capability flag if appropriate */ | ||
| 36 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
| 37 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
| 38 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
| 39 | } | 51 | } |
| 40 | 52 | ||
| 41 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 53 | |
| 54 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | ||
| 42 | { | 55 | { |
| 43 | u32 l, h; | 56 | u32 l, h; |
| 44 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | 57 | int mbytes = num_physpages >> (20-PAGE_SHIFT); |
| 45 | int r; | ||
| 46 | 58 | ||
| 59 | if (c->x86_model < 6) { | ||
| 60 | /* Based on AMD doc 20734R - June 2000 */ | ||
| 61 | if (c->x86_model == 0) { | ||
| 62 | clear_cpu_cap(c, X86_FEATURE_APIC); | ||
| 63 | set_cpu_cap(c, X86_FEATURE_PGE); | ||
| 64 | } | ||
| 65 | return; | ||
| 66 | } | ||
| 67 | |||
| 68 | if (c->x86_model == 6 && c->x86_mask == 1) { | ||
| 69 | const int K6_BUG_LOOP = 1000000; | ||
| 70 | int n; | ||
| 71 | void (*f_vide)(void); | ||
| 72 | unsigned long d, d2; | ||
| 73 | |||
| 74 | printk(KERN_INFO "AMD K6 stepping B detected - "); | ||
| 75 | |||
| 76 | /* | ||
| 77 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | ||
| 78 | * calls at the same time. | ||
| 79 | */ | ||
| 80 | |||
| 81 | n = K6_BUG_LOOP; | ||
| 82 | f_vide = vide; | ||
| 83 | rdtscl(d); | ||
| 84 | while (n--) | ||
| 85 | f_vide(); | ||
| 86 | rdtscl(d2); | ||
| 87 | d = d2-d; | ||
| 88 | |||
| 89 | if (d > 20*K6_BUG_LOOP) | ||
| 90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | ||
| 91 | else | ||
| 92 | printk("probably OK (after B9730xxxx).\n"); | ||
| 93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | ||
| 94 | } | ||
| 95 | |||
| 96 | /* K6 with old style WHCR */ | ||
| 97 | if (c->x86_model < 8 || | ||
| 98 | (c->x86_model == 8 && c->x86_mask < 8)) { | ||
| 99 | /* We can only write allocate on the low 508Mb */ | ||
| 100 | if (mbytes > 508) | ||
| 101 | mbytes = 508; | ||
| 102 | |||
| 103 | rdmsr(MSR_K6_WHCR, l, h); | ||
| 104 | if ((l&0x0000FFFF) == 0) { | ||
| 105 | unsigned long flags; | ||
| 106 | l = (1<<0)|((mbytes/4)<<1); | ||
| 107 | local_irq_save(flags); | ||
| 108 | wbinvd(); | ||
| 109 | wrmsr(MSR_K6_WHCR, l, h); | ||
| 110 | local_irq_restore(flags); | ||
| 111 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | ||
| 112 | mbytes); | ||
| 113 | } | ||
| 114 | return; | ||
| 115 | } | ||
| 116 | |||
| 117 | if ((c->x86_model == 8 && c->x86_mask > 7) || | ||
| 118 | c->x86_model == 9 || c->x86_model == 13) { | ||
| 119 | /* The more serious chips .. */ | ||
| 120 | |||
| 121 | if (mbytes > 4092) | ||
| 122 | mbytes = 4092; | ||
| 123 | |||
| 124 | rdmsr(MSR_K6_WHCR, l, h); | ||
| 125 | if ((l&0xFFFF0000) == 0) { | ||
| 126 | unsigned long flags; | ||
| 127 | l = ((mbytes>>2)<<22)|(1<<16); | ||
| 128 | local_irq_save(flags); | ||
| 129 | wbinvd(); | ||
| 130 | wrmsr(MSR_K6_WHCR, l, h); | ||
| 131 | local_irq_restore(flags); | ||
| 132 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | ||
| 133 | mbytes); | ||
| 134 | } | ||
| 135 | |||
| 136 | return; | ||
| 137 | } | ||
| 138 | |||
| 139 | if (c->x86_model == 10) { | ||
| 140 | /* AMD Geode LX is model 10 */ | ||
| 141 | /* placeholder for any needed mods */ | ||
| 142 | return; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | ||
| 147 | { | ||
| 148 | u32 l, h; | ||
| 149 | |||
| 150 | /* | ||
| 151 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
| 152 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
| 153 | * If the BIOS didn't enable it already, enable it here. | ||
| 154 | */ | ||
| 155 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
| 156 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
| 157 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
| 158 | rdmsr(MSR_K7_HWCR, l, h); | ||
| 159 | l &= ~0x00008000; | ||
| 160 | wrmsr(MSR_K7_HWCR, l, h); | ||
| 161 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | /* | ||
| 166 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
| 167 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
| 168 | * As per AMD technical note 27212 0.2 | ||
| 169 | */ | ||
| 170 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
| 171 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
| 172 | if ((l & 0xfff00000) != 0x20000000) { | ||
| 173 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | ||
| 174 | ((l & 0x000fffff)|0x20000000)); | ||
| 175 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 179 | set_cpu_cap(c, X86_FEATURE_K7); | ||
| 180 | } | ||
| 181 | #endif | ||
| 182 | |||
| 183 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
| 184 | static int __cpuinit nearby_node(int apicid) | ||
| 185 | { | ||
| 186 | int i, node; | ||
| 187 | |||
| 188 | for (i = apicid - 1; i >= 0; i--) { | ||
| 189 | node = apicid_to_node[i]; | ||
| 190 | if (node != NUMA_NO_NODE && node_online(node)) | ||
| 191 | return node; | ||
| 192 | } | ||
| 193 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
| 194 | node = apicid_to_node[i]; | ||
| 195 | if (node != NUMA_NO_NODE && node_online(node)) | ||
| 196 | return node; | ||
| 197 | } | ||
| 198 | return first_node(node_online_map); /* Shouldn't happen */ | ||
| 199 | } | ||
| 200 | #endif | ||
| 201 | |||
| 202 | /* | ||
| 203 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
| 204 | * Assumes number of cores is a power of two. | ||
| 205 | */ | ||
| 206 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
| 207 | { | ||
| 208 | #ifdef CONFIG_X86_HT | ||
| 209 | unsigned bits; | ||
| 210 | |||
| 211 | bits = c->x86_coreid_bits; | ||
| 212 | |||
| 213 | /* Low order bits define the core id (index of core in socket) */ | ||
| 214 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
| 215 | /* Convert the initial APIC ID into the socket ID */ | ||
| 216 | c->phys_proc_id = c->initial_apicid >> bits; | ||
| 217 | #endif | ||
| 218 | } | ||
| 219 | |||
| 220 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | ||
| 221 | { | ||
| 222 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
| 223 | int cpu = smp_processor_id(); | ||
| 224 | int node; | ||
| 225 | unsigned apicid = hard_smp_processor_id(); | ||
| 226 | |||
| 227 | node = c->phys_proc_id; | ||
| 228 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
| 229 | node = apicid_to_node[apicid]; | ||
| 230 | if (!node_online(node)) { | ||
| 231 | /* Two possibilities here: | ||
| 232 | - The CPU is missing memory and no node was created. | ||
| 233 | In that case try picking one from a nearby CPU | ||
| 234 | - The APIC IDs differ from the HyperTransport node IDs | ||
| 235 | which the K8 northbridge parsing fills in. | ||
| 236 | Assume they are all increased by a constant offset, | ||
| 237 | but in the same order as the HT nodeids. | ||
| 238 | If that doesn't result in a usable node fall back to the | ||
| 239 | path for the previous case. */ | ||
| 240 | |||
| 241 | int ht_nodeid = c->initial_apicid; | ||
| 242 | |||
| 243 | if (ht_nodeid >= 0 && | ||
| 244 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
| 245 | node = apicid_to_node[ht_nodeid]; | ||
| 246 | /* Pick a nearby node */ | ||
| 247 | if (!node_online(node)) | ||
| 248 | node = nearby_node(apicid); | ||
| 249 | } | ||
| 250 | numa_set_node(cpu, node); | ||
| 251 | |||
| 252 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
| 253 | #endif | ||
| 254 | } | ||
| 255 | |||
| 256 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
| 257 | { | ||
| 258 | #ifdef CONFIG_X86_HT | ||
| 259 | unsigned bits, ecx; | ||
| 260 | |||
| 261 | /* Multi core CPU? */ | ||
| 262 | if (c->extended_cpuid_level < 0x80000008) | ||
| 263 | return; | ||
| 264 | |||
| 265 | ecx = cpuid_ecx(0x80000008); | ||
| 266 | |||
| 267 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
| 268 | |||
| 269 | /* CPU telling us the core id bits shift? */ | ||
| 270 | bits = (ecx >> 12) & 0xF; | ||
| 271 | |||
| 272 | /* Otherwise recompute */ | ||
| 273 | if (bits == 0) { | ||
| 274 | while ((1 << bits) < c->x86_max_cores) | ||
| 275 | bits++; | ||
| 276 | } | ||
| 277 | |||
| 278 | c->x86_coreid_bits = bits; | ||
| 279 | #endif | ||
| 280 | } | ||
| 281 | |||
| 282 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
| 283 | { | ||
| 284 | early_init_amd_mc(c); | ||
| 285 | |||
| 286 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
| 287 | if (c->x86_power & (1<<8)) | ||
| 288 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
| 289 | |||
| 290 | #ifdef CONFIG_X86_64 | ||
| 291 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
| 292 | #else | ||
| 293 | /* Set MTRR capability flag if appropriate */ | ||
| 294 | if (c->x86 == 5) | ||
| 295 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
| 296 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
| 297 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
| 298 | #endif | ||
| 299 | } | ||
| 300 | |||
| 301 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
| 302 | { | ||
| 47 | #ifdef CONFIG_SMP | 303 | #ifdef CONFIG_SMP |
| 48 | unsigned long long value; | 304 | unsigned long long value; |
| 49 | 305 | ||
| @@ -54,7 +310,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 54 | * Errata 63 for SH-B3 steppings | 310 | * Errata 63 for SH-B3 steppings |
| 55 | * Errata 122 for all steppings (F+ have it disabled by default) | 311 | * Errata 122 for all steppings (F+ have it disabled by default) |
| 56 | */ | 312 | */ |
| 57 | if (c->x86 == 15) { | 313 | if (c->x86 == 0xf) { |
| 58 | rdmsrl(MSR_K7_HWCR, value); | 314 | rdmsrl(MSR_K7_HWCR, value); |
| 59 | value |= 1 << 6; | 315 | value |= 1 << 6; |
| 60 | wrmsrl(MSR_K7_HWCR, value); | 316 | wrmsrl(MSR_K7_HWCR, value); |
| @@ -64,209 +320,119 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 64 | early_init_amd(c); | 320 | early_init_amd(c); |
| 65 | 321 | ||
| 66 | /* | 322 | /* |
| 67 | * FIXME: We should handle the K5 here. Set up the write | ||
| 68 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | ||
| 69 | * no bus pipeline) | ||
| 70 | */ | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 323 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
| 74 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | 324 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
| 75 | */ | 325 | */ |
| 76 | clear_cpu_cap(c, 0*32+31); | 326 | clear_cpu_cap(c, 0*32+31); |
| 77 | 327 | ||
| 78 | r = get_model_name(c); | 328 | #ifdef CONFIG_X86_64 |
| 329 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
| 330 | if (c->x86 == 0xf) { | ||
| 331 | u32 level; | ||
| 79 | 332 | ||
| 80 | switch (c->x86) { | 333 | level = cpuid_eax(1); |
| 81 | case 4: | 334 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
| 82 | /* | 335 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
| 83 | * General Systems BIOSen alias the cpu frequency registers | ||
| 84 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | ||
| 85 | * drivers subsequently pokes it, and changes the CPU speed. | ||
| 86 | * Workaround : Remove the unneeded alias. | ||
| 87 | */ | ||
| 88 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | ||
| 89 | #define CBAR_ENB (0x80000000) | ||
| 90 | #define CBAR_KEY (0X000000CB) | ||
| 91 | if (c->x86_model == 9 || c->x86_model == 10) { | ||
| 92 | if (inl (CBAR) & CBAR_ENB) | ||
| 93 | outl (0 | CBAR_KEY, CBAR); | ||
| 94 | } | ||
| 95 | break; | ||
| 96 | case 5: | ||
| 97 | if (c->x86_model < 6) { | ||
| 98 | /* Based on AMD doc 20734R - June 2000 */ | ||
| 99 | if (c->x86_model == 0) { | ||
| 100 | clear_cpu_cap(c, X86_FEATURE_APIC); | ||
| 101 | set_cpu_cap(c, X86_FEATURE_PGE); | ||
| 102 | } | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | |||
| 106 | if (c->x86_model == 6 && c->x86_mask == 1) { | ||
| 107 | const int K6_BUG_LOOP = 1000000; | ||
| 108 | int n; | ||
| 109 | void (*f_vide)(void); | ||
| 110 | unsigned long d, d2; | ||
| 111 | |||
| 112 | printk(KERN_INFO "AMD K6 stepping B detected - "); | ||
| 113 | |||
| 114 | /* | ||
| 115 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | ||
| 116 | * calls at the same time. | ||
| 117 | */ | ||
| 118 | |||
| 119 | n = K6_BUG_LOOP; | ||
| 120 | f_vide = vide; | ||
| 121 | rdtscl(d); | ||
| 122 | while (n--) | ||
| 123 | f_vide(); | ||
| 124 | rdtscl(d2); | ||
| 125 | d = d2-d; | ||
| 126 | |||
| 127 | if (d > 20*K6_BUG_LOOP) | ||
| 128 | printk("system stability may be impaired when more than 32 MB are used.\n"); | ||
| 129 | else | ||
| 130 | printk("probably OK (after B9730xxxx).\n"); | ||
| 131 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* K6 with old style WHCR */ | ||
| 135 | if (c->x86_model < 8 || | ||
| 136 | (c->x86_model == 8 && c->x86_mask < 8)) { | ||
| 137 | /* We can only write allocate on the low 508Mb */ | ||
| 138 | if (mbytes > 508) | ||
| 139 | mbytes = 508; | ||
| 140 | |||
| 141 | rdmsr(MSR_K6_WHCR, l, h); | ||
| 142 | if ((l&0x0000FFFF) == 0) { | ||
| 143 | unsigned long flags; | ||
| 144 | l = (1<<0)|((mbytes/4)<<1); | ||
| 145 | local_irq_save(flags); | ||
| 146 | wbinvd(); | ||
| 147 | wrmsr(MSR_K6_WHCR, l, h); | ||
| 148 | local_irq_restore(flags); | ||
| 149 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | ||
| 150 | mbytes); | ||
| 151 | } | ||
| 152 | break; | ||
| 153 | } | ||
| 154 | |||
| 155 | if ((c->x86_model == 8 && c->x86_mask > 7) || | ||
| 156 | c->x86_model == 9 || c->x86_model == 13) { | ||
| 157 | /* The more serious chips .. */ | ||
| 158 | |||
| 159 | if (mbytes > 4092) | ||
| 160 | mbytes = 4092; | ||
| 161 | |||
| 162 | rdmsr(MSR_K6_WHCR, l, h); | ||
| 163 | if ((l&0xFFFF0000) == 0) { | ||
| 164 | unsigned long flags; | ||
| 165 | l = ((mbytes>>2)<<22)|(1<<16); | ||
| 166 | local_irq_save(flags); | ||
| 167 | wbinvd(); | ||
| 168 | wrmsr(MSR_K6_WHCR, l, h); | ||
| 169 | local_irq_restore(flags); | ||
| 170 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | ||
| 171 | mbytes); | ||
| 172 | } | ||
| 173 | |||
| 174 | break; | ||
| 175 | } | ||
| 176 | |||
| 177 | if (c->x86_model == 10) { | ||
| 178 | /* AMD Geode LX is model 10 */ | ||
| 179 | /* placeholder for any needed mods */ | ||
| 180 | break; | ||
| 181 | } | ||
| 182 | break; | ||
| 183 | case 6: /* An Athlon/Duron */ | ||
| 184 | |||
| 185 | /* | ||
| 186 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
| 187 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
| 188 | * If the BIOS didn't enable it already, enable it here. | ||
| 189 | */ | ||
| 190 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
| 191 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
| 192 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
| 193 | rdmsr(MSR_K7_HWCR, l, h); | ||
| 194 | l &= ~0x00008000; | ||
| 195 | wrmsr(MSR_K7_HWCR, l, h); | ||
| 196 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | /* | ||
| 201 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
| 202 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
| 203 | * As per AMD technical note 27212 0.2 | ||
| 204 | */ | ||
| 205 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
| 206 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
| 207 | if ((l & 0xfff00000) != 0x20000000) { | ||
| 208 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | ||
| 209 | ((l & 0x000fffff)|0x20000000)); | ||
| 210 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
| 211 | } | ||
| 212 | } | ||
| 213 | break; | ||
| 214 | } | 336 | } |
| 337 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
| 338 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 339 | #else | ||
| 340 | |||
| 341 | /* | ||
| 342 | * FIXME: We should handle the K5 here. Set up the write | ||
| 343 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | ||
| 344 | * no bus pipeline) | ||
| 345 | */ | ||
| 215 | 346 | ||
| 216 | switch (c->x86) { | 347 | switch (c->x86) { |
| 217 | case 15: | 348 | case 4: |
| 218 | /* Use K8 tuning for Fam10h and Fam11h */ | 349 | init_amd_k5(c); |
| 219 | case 0x10: | ||
| 220 | case 0x11: | ||
| 221 | set_cpu_cap(c, X86_FEATURE_K8); | ||
| 222 | break; | 350 | break; |
| 223 | case 6: | 351 | case 5: |
| 224 | set_cpu_cap(c, X86_FEATURE_K7); | 352 | init_amd_k6(c); |
| 353 | break; | ||
| 354 | case 6: /* An Athlon/Duron */ | ||
| 355 | init_amd_k7(c); | ||
| 225 | break; | 356 | break; |
| 226 | } | 357 | } |
| 358 | |||
| 359 | /* K6s reports MCEs but don't actually have all the MSRs */ | ||
| 360 | if (c->x86 < 6) | ||
| 361 | clear_cpu_cap(c, X86_FEATURE_MCE); | ||
| 362 | #endif | ||
| 363 | |||
| 364 | /* Enable workaround for FXSAVE leak */ | ||
| 227 | if (c->x86 >= 6) | 365 | if (c->x86 >= 6) |
| 228 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | 366 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
| 229 | 367 | ||
| 230 | display_cacheinfo(c); | 368 | if (!c->x86_model_id[0]) { |
| 231 | 369 | switch (c->x86) { | |
| 232 | if (cpuid_eax(0x80000000) >= 0x80000008) | 370 | case 0xf: |
| 233 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; | 371 | /* Should distinguish Models here, but this is only |
| 372 | a fallback anyways. */ | ||
| 373 | strcpy(c->x86_model_id, "Hammer"); | ||
| 374 | break; | ||
| 375 | } | ||
| 376 | } | ||
| 234 | 377 | ||
| 235 | #ifdef CONFIG_X86_HT | 378 | display_cacheinfo(c); |
| 236 | /* | ||
| 237 | * On a AMD multi core setup the lower bits of the APIC id | ||
| 238 | * distinguish the cores. | ||
| 239 | */ | ||
| 240 | if (c->x86_max_cores > 1) { | ||
| 241 | int cpu = smp_processor_id(); | ||
| 242 | unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; | ||
| 243 | 379 | ||
| 244 | if (bits == 0) { | 380 | /* Multi core CPU? */ |
| 245 | while ((1 << bits) < c->x86_max_cores) | 381 | if (c->extended_cpuid_level >= 0x80000008) { |
| 246 | bits++; | 382 | amd_detect_cmp(c); |
| 247 | } | 383 | srat_detect_node(c); |
| 248 | c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1); | ||
| 249 | c->phys_proc_id >>= bits; | ||
| 250 | printk(KERN_INFO "CPU %d(%d) -> Core %d\n", | ||
| 251 | cpu, c->x86_max_cores, c->cpu_core_id); | ||
| 252 | } | 384 | } |
| 385 | |||
| 386 | #ifdef CONFIG_X86_32 | ||
| 387 | detect_ht(c); | ||
| 253 | #endif | 388 | #endif |
| 254 | 389 | ||
| 255 | if (cpuid_eax(0x80000000) >= 0x80000006) { | 390 | if (c->extended_cpuid_level >= 0x80000006) { |
| 256 | if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000)) | 391 | if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) |
| 257 | num_cache_leaves = 4; | 392 | num_cache_leaves = 4; |
| 258 | else | 393 | else |
| 259 | num_cache_leaves = 3; | 394 | num_cache_leaves = 3; |
| 260 | } | 395 | } |
| 261 | 396 | ||
| 262 | /* K6s reports MCEs but don't actually have all the MSRs */ | 397 | if (c->x86 >= 0xf && c->x86 <= 0x11) |
| 263 | if (c->x86 < 6) | 398 | set_cpu_cap(c, X86_FEATURE_K8); |
| 264 | clear_cpu_cap(c, X86_FEATURE_MCE); | ||
| 265 | 399 | ||
| 266 | if (cpu_has_xmm2) | 400 | if (cpu_has_xmm2) { |
| 401 | /* MFENCE stops RDTSC speculation */ | ||
| 267 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | 402 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
| 403 | } | ||
| 404 | |||
| 405 | #ifdef CONFIG_X86_64 | ||
| 406 | if (c->x86 == 0x10) { | ||
| 407 | /* do this for boot cpu */ | ||
| 408 | if (c == &boot_cpu_data) | ||
| 409 | check_enable_amd_mmconf_dmi(); | ||
| 410 | |||
| 411 | fam10h_check_enable_mmcfg(); | ||
| 412 | } | ||
| 413 | |||
| 414 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
| 415 | unsigned long long tseg; | ||
| 416 | |||
| 417 | /* | ||
| 418 | * Split up direct mapping around the TSEG SMM area. | ||
| 419 | * Don't do it for gbpages because there seems very little | ||
| 420 | * benefit in doing so. | ||
| 421 | */ | ||
| 422 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
| 423 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
| 424 | if ((tseg>>PMD_SHIFT) < | ||
| 425 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | ||
| 426 | ((tseg>>PMD_SHIFT) < | ||
| 427 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | ||
| 428 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | ||
| 429 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
| 430 | } | ||
| 431 | } | ||
| 432 | #endif | ||
| 268 | } | 433 | } |
| 269 | 434 | ||
| 435 | #ifdef CONFIG_X86_32 | ||
| 270 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 436 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
| 271 | { | 437 | { |
| 272 | /* AMD errata T13 (order #21922) */ | 438 | /* AMD errata T13 (order #21922) */ |
| @@ -279,10 +445,12 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int | |||
| 279 | } | 445 | } |
| 280 | return size; | 446 | return size; |
| 281 | } | 447 | } |
| 448 | #endif | ||
| 282 | 449 | ||
| 283 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | 450 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { |
| 284 | .c_vendor = "AMD", | 451 | .c_vendor = "AMD", |
| 285 | .c_ident = { "AuthenticAMD" }, | 452 | .c_ident = { "AuthenticAMD" }, |
| 453 | #ifdef CONFIG_X86_32 | ||
| 286 | .c_models = { | 454 | .c_models = { |
| 287 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | 455 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = |
| 288 | { | 456 | { |
| @@ -295,9 +463,11 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { | |||
| 295 | } | 463 | } |
| 296 | }, | 464 | }, |
| 297 | }, | 465 | }, |
| 466 | .c_size_cache = amd_size_cache, | ||
| 467 | #endif | ||
| 298 | .c_early_init = early_init_amd, | 468 | .c_early_init = early_init_amd, |
| 299 | .c_init = init_amd, | 469 | .c_init = init_amd, |
| 300 | .c_size_cache = amd_size_cache, | 470 | .c_x86_vendor = X86_VENDOR_AMD, |
| 301 | }; | 471 | }; |
| 302 | 472 | ||
| 303 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | 473 | cpu_dev_register(amd_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c deleted file mode 100644 index d1692b2a41ff..000000000000 --- a/arch/x86/kernel/cpu/amd_64.c +++ /dev/null | |||
| @@ -1,224 +0,0 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/mm.h> | ||
| 3 | |||
| 4 | #include <asm/numa_64.h> | ||
| 5 | #include <asm/mmconfig.h> | ||
| 6 | #include <asm/cacheflush.h> | ||
| 7 | |||
| 8 | #include <mach_apic.h> | ||
| 9 | |||
| 10 | #include "cpu.h" | ||
| 11 | |||
| 12 | int force_mwait __cpuinitdata; | ||
| 13 | |||
| 14 | #ifdef CONFIG_NUMA | ||
| 15 | static int __cpuinit nearby_node(int apicid) | ||
| 16 | { | ||
| 17 | int i, node; | ||
| 18 | |||
| 19 | for (i = apicid - 1; i >= 0; i--) { | ||
| 20 | node = apicid_to_node[i]; | ||
| 21 | if (node != NUMA_NO_NODE && node_online(node)) | ||
| 22 | return node; | ||
| 23 | } | ||
| 24 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
| 25 | node = apicid_to_node[i]; | ||
| 26 | if (node != NUMA_NO_NODE && node_online(node)) | ||
| 27 | return node; | ||
| 28 | } | ||
| 29 | return first_node(node_online_map); /* Shouldn't happen */ | ||
| 30 | } | ||
| 31 | #endif | ||
| 32 | |||
| 33 | /* | ||
| 34 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
| 35 | * Assumes number of cores is a power of two. | ||
| 36 | */ | ||
| 37 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
| 38 | { | ||
| 39 | #ifdef CONFIG_SMP | ||
| 40 | unsigned bits; | ||
| 41 | #ifdef CONFIG_NUMA | ||
| 42 | int cpu = smp_processor_id(); | ||
| 43 | int node = 0; | ||
| 44 | unsigned apicid = hard_smp_processor_id(); | ||
| 45 | #endif | ||
| 46 | bits = c->x86_coreid_bits; | ||
| 47 | |||
| 48 | /* Low order bits define the core id (index of core in socket) */ | ||
| 49 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
| 50 | /* Convert the initial APIC ID into the socket ID */ | ||
| 51 | c->phys_proc_id = c->initial_apicid >> bits; | ||
| 52 | |||
| 53 | #ifdef CONFIG_NUMA | ||
| 54 | node = c->phys_proc_id; | ||
| 55 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
| 56 | node = apicid_to_node[apicid]; | ||
| 57 | if (!node_online(node)) { | ||
| 58 | /* Two possibilities here: | ||
| 59 | - The CPU is missing memory and no node was created. | ||
| 60 | In that case try picking one from a nearby CPU | ||
| 61 | - The APIC IDs differ from the HyperTransport node IDs | ||
| 62 | which the K8 northbridge parsing fills in. | ||
| 63 | Assume they are all increased by a constant offset, | ||
| 64 | but in the same order as the HT nodeids. | ||
| 65 | If that doesn't result in a usable node fall back to the | ||
| 66 | path for the previous case. */ | ||
| 67 | |||
| 68 | int ht_nodeid = c->initial_apicid; | ||
| 69 | |||
| 70 | if (ht_nodeid >= 0 && | ||
| 71 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
| 72 | node = apicid_to_node[ht_nodeid]; | ||
| 73 | /* Pick a nearby node */ | ||
| 74 | if (!node_online(node)) | ||
| 75 | node = nearby_node(apicid); | ||
| 76 | } | ||
| 77 | numa_set_node(cpu, node); | ||
| 78 | |||
| 79 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
| 80 | #endif | ||
| 81 | #endif | ||
| 82 | } | ||
| 83 | |||
| 84 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
| 85 | { | ||
| 86 | #ifdef CONFIG_SMP | ||
| 87 | unsigned bits, ecx; | ||
| 88 | |||
| 89 | /* Multi core CPU? */ | ||
| 90 | if (c->extended_cpuid_level < 0x80000008) | ||
| 91 | return; | ||
| 92 | |||
| 93 | ecx = cpuid_ecx(0x80000008); | ||
| 94 | |||
| 95 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
| 96 | |||
| 97 | /* CPU telling us the core id bits shift? */ | ||
| 98 | bits = (ecx >> 12) & 0xF; | ||
| 99 | |||
| 100 | /* Otherwise recompute */ | ||
| 101 | if (bits == 0) { | ||
| 102 | while ((1 << bits) < c->x86_max_cores) | ||
| 103 | bits++; | ||
| 104 | } | ||
| 105 | |||
| 106 | c->x86_coreid_bits = bits; | ||
| 107 | |||
| 108 | #endif | ||
| 109 | } | ||
| 110 | |||
| 111 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
| 112 | { | ||
| 113 | early_init_amd_mc(c); | ||
| 114 | |||
| 115 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
| 116 | if (c->x86_power & (1<<8)) | ||
| 117 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
| 118 | |||
| 119 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
| 120 | } | ||
| 121 | |||
| 122 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
| 123 | { | ||
| 124 | unsigned level; | ||
| 125 | |||
| 126 | #ifdef CONFIG_SMP | ||
| 127 | unsigned long value; | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
| 131 | * bit 6 of msr C001_0015 | ||
| 132 | * | ||
| 133 | * Errata 63 for SH-B3 steppings | ||
| 134 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
| 135 | */ | ||
| 136 | if (c->x86 == 0xf) { | ||
| 137 | rdmsrl(MSR_K8_HWCR, value); | ||
| 138 | value |= 1 << 6; | ||
| 139 | wrmsrl(MSR_K8_HWCR, value); | ||
| 140 | } | ||
| 141 | #endif | ||
| 142 | |||
| 143 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | ||
| 144 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | ||
| 145 | clear_cpu_cap(c, 0*32+31); | ||
| 146 | |||
| 147 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
| 148 | if (c->x86 == 0xf) { | ||
| 149 | level = cpuid_eax(1); | ||
| 150 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | ||
| 151 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 152 | } | ||
| 153 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
| 154 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 155 | |||
| 156 | /* Enable workaround for FXSAVE leak */ | ||
| 157 | if (c->x86 >= 6) | ||
| 158 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | ||
| 159 | |||
| 160 | level = get_model_name(c); | ||
| 161 | if (!level) { | ||
| 162 | switch (c->x86) { | ||
| 163 | case 0xf: | ||
| 164 | /* Should distinguish Models here, but this is only | ||
| 165 | a fallback anyways. */ | ||
| 166 | strcpy(c->x86_model_id, "Hammer"); | ||
| 167 | break; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | display_cacheinfo(c); | ||
| 171 | |||
| 172 | /* Multi core CPU? */ | ||
| 173 | if (c->extended_cpuid_level >= 0x80000008) | ||
| 174 | amd_detect_cmp(c); | ||
| 175 | |||
| 176 | if (c->extended_cpuid_level >= 0x80000006 && | ||
| 177 | (cpuid_edx(0x80000006) & 0xf000)) | ||
| 178 | num_cache_leaves = 4; | ||
| 179 | else | ||
| 180 | num_cache_leaves = 3; | ||
| 181 | |||
| 182 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
| 183 | set_cpu_cap(c, X86_FEATURE_K8); | ||
| 184 | |||
| 185 | /* MFENCE stops RDTSC speculation */ | ||
| 186 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
| 187 | |||
| 188 | if (c->x86 == 0x10) { | ||
| 189 | /* do this for boot cpu */ | ||
| 190 | if (c == &boot_cpu_data) | ||
| 191 | check_enable_amd_mmconf_dmi(); | ||
| 192 | |||
| 193 | fam10h_check_enable_mmcfg(); | ||
| 194 | } | ||
| 195 | |||
| 196 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
| 197 | unsigned long long tseg; | ||
| 198 | |||
| 199 | /* | ||
| 200 | * Split up direct mapping around the TSEG SMM area. | ||
| 201 | * Don't do it for gbpages because there seems very little | ||
| 202 | * benefit in doing so. | ||
| 203 | */ | ||
| 204 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
| 205 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
| 206 | if ((tseg>>PMD_SHIFT) < | ||
| 207 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | ||
| 208 | ((tseg>>PMD_SHIFT) < | ||
| 209 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | ||
| 210 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | ||
| 211 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
| 212 | } | ||
| 213 | } | ||
| 214 | } | ||
| 215 | |||
| 216 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | ||
| 217 | .c_vendor = "AMD", | ||
| 218 | .c_ident = { "AuthenticAMD" }, | ||
| 219 | .c_early_init = early_init_amd, | ||
| 220 | .c_init = init_amd, | ||
| 221 | }; | ||
| 222 | |||
| 223 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | ||
| 224 | |||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index a0534c04d38a..89bfdd9cacc6 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
| @@ -289,7 +289,6 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
| 289 | if (c->x86_model >= 6 && c->x86_model < 9) | 289 | if (c->x86_model >= 6 && c->x86_model < 9) |
| 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); | 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
| 291 | 291 | ||
| 292 | get_model_name(c); | ||
| 293 | display_cacheinfo(c); | 292 | display_cacheinfo(c); |
| 294 | } | 293 | } |
| 295 | 294 | ||
| @@ -475,6 +474,7 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
| 475 | .c_early_init = early_init_centaur, | 474 | .c_early_init = early_init_centaur, |
| 476 | .c_init = init_centaur, | 475 | .c_init = init_centaur, |
| 477 | .c_size_cache = centaur_size_cache, | 476 | .c_size_cache = centaur_size_cache, |
| 477 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
| 478 | }; | 478 | }; |
| 479 | 479 | ||
| 480 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); | 480 | cpu_dev_register(centaur_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c index 1d181c40e2e1..a1625f5a1e78 100644 --- a/arch/x86/kernel/cpu/centaur_64.c +++ b/arch/x86/kernel/cpu/centaur_64.c | |||
| @@ -16,9 +16,10 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | |||
| 16 | 16 | ||
| 17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
| 18 | { | 18 | { |
| 19 | early_init_centaur(c); | ||
| 20 | |||
| 19 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | 21 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { |
| 20 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 22 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
| 21 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
| 22 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 23 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
| 23 | } | 24 | } |
| 24 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 25 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
| @@ -29,7 +30,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
| 29 | .c_ident = { "CentaurHauls" }, | 30 | .c_ident = { "CentaurHauls" }, |
| 30 | .c_early_init = early_init_centaur, | 31 | .c_early_init = early_init_centaur, |
| 31 | .c_init = init_centaur, | 32 | .c_init = init_centaur, |
| 33 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
| 32 | }; | 34 | }; |
| 33 | 35 | ||
| 34 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); | 36 | cpu_dev_register(centaur_cpu_dev); |
| 35 | 37 | ||
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/kernel/cpu/cmpxchg.c new file mode 100644 index 000000000000..2056ccf572cc --- /dev/null +++ b/arch/x86/kernel/cpu/cmpxchg.c | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * cmpxchg*() fallbacks for CPU not supporting these instructions | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/kernel.h> | ||
| 6 | #include <linux/smp.h> | ||
| 7 | #include <linux/module.h> | ||
| 8 | |||
| 9 | #ifndef CONFIG_X86_CMPXCHG | ||
| 10 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
| 11 | { | ||
| 12 | u8 prev; | ||
| 13 | unsigned long flags; | ||
| 14 | |||
| 15 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
| 16 | local_irq_save(flags); | ||
| 17 | prev = *(u8 *)ptr; | ||
| 18 | if (prev == old) | ||
| 19 | *(u8 *)ptr = new; | ||
| 20 | local_irq_restore(flags); | ||
| 21 | return prev; | ||
| 22 | } | ||
| 23 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
| 24 | |||
| 25 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
| 26 | { | ||
| 27 | u16 prev; | ||
| 28 | unsigned long flags; | ||
| 29 | |||
| 30 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
| 31 | local_irq_save(flags); | ||
| 32 | prev = *(u16 *)ptr; | ||
| 33 | if (prev == old) | ||
| 34 | *(u16 *)ptr = new; | ||
| 35 | local_irq_restore(flags); | ||
| 36 | return prev; | ||
| 37 | } | ||
| 38 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
| 39 | |||
| 40 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
| 41 | { | ||
| 42 | u32 prev; | ||
| 43 | unsigned long flags; | ||
| 44 | |||
| 45 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
| 46 | local_irq_save(flags); | ||
| 47 | prev = *(u32 *)ptr; | ||
| 48 | if (prev == old) | ||
| 49 | *(u32 *)ptr = new; | ||
| 50 | local_irq_restore(flags); | ||
| 51 | return prev; | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
| 54 | #endif | ||
| 55 | |||
| 56 | #ifndef CONFIG_X86_CMPXCHG64 | ||
| 57 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
| 58 | { | ||
| 59 | u64 prev; | ||
| 60 | unsigned long flags; | ||
| 61 | |||
| 62 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
| 63 | local_irq_save(flags); | ||
| 64 | prev = *(u64 *)ptr; | ||
| 65 | if (prev == old) | ||
| 66 | *(u64 *)ptr = new; | ||
| 67 | local_irq_restore(flags); | ||
| 68 | return prev; | ||
| 69 | } | ||
| 70 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
| 71 | #endif | ||
| 72 | |||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4e456bd955bb..7581b62df184 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -1,28 +1,62 @@ | |||
| 1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
| 2 | #include <linux/kernel.h> | ||
| 3 | #include <linux/sched.h> | ||
| 2 | #include <linux/string.h> | 4 | #include <linux/string.h> |
| 5 | #include <linux/bootmem.h> | ||
| 6 | #include <linux/bitops.h> | ||
| 7 | #include <linux/module.h> | ||
| 8 | #include <linux/kgdb.h> | ||
| 9 | #include <linux/topology.h> | ||
| 3 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
| 4 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
| 5 | #include <linux/module.h> | ||
| 6 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
| 7 | #include <linux/bootmem.h> | ||
| 8 | #include <asm/processor.h> | ||
| 9 | #include <asm/i387.h> | 13 | #include <asm/i387.h> |
| 10 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
| 11 | #include <asm/io.h> | 15 | #include <asm/io.h> |
| 16 | #include <asm/linkage.h> | ||
| 12 | #include <asm/mmu_context.h> | 17 | #include <asm/mmu_context.h> |
| 13 | #include <asm/mtrr.h> | 18 | #include <asm/mtrr.h> |
| 14 | #include <asm/mce.h> | 19 | #include <asm/mce.h> |
| 15 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
| 16 | #include <asm/asm.h> | 21 | #include <asm/asm.h> |
| 22 | #include <asm/numa.h> | ||
| 17 | #ifdef CONFIG_X86_LOCAL_APIC | 23 | #ifdef CONFIG_X86_LOCAL_APIC |
| 18 | #include <asm/mpspec.h> | 24 | #include <asm/mpspec.h> |
| 19 | #include <asm/apic.h> | 25 | #include <asm/apic.h> |
| 20 | #include <mach_apic.h> | 26 | #include <mach_apic.h> |
| 27 | #include <asm/genapic.h> | ||
| 21 | #endif | 28 | #endif |
| 22 | 29 | ||
| 30 | #include <asm/pda.h> | ||
| 31 | #include <asm/pgtable.h> | ||
| 32 | #include <asm/processor.h> | ||
| 33 | #include <asm/desc.h> | ||
| 34 | #include <asm/atomic.h> | ||
| 35 | #include <asm/proto.h> | ||
| 36 | #include <asm/sections.h> | ||
| 37 | #include <asm/setup.h> | ||
| 38 | |||
| 23 | #include "cpu.h" | 39 | #include "cpu.h" |
| 24 | 40 | ||
| 41 | static struct cpu_dev *this_cpu __cpuinitdata; | ||
| 42 | |||
| 43 | #ifdef CONFIG_X86_64 | ||
| 44 | /* We need valid kernel segments for data and code in long mode too | ||
| 45 | * IRET will check the segment types kkeil 2000/10/28 | ||
| 46 | * Also sysret mandates a special GDT layout | ||
| 47 | */ | ||
| 48 | /* The TLS descriptors are currently at a different place compared to i386. | ||
| 49 | Hopefully nobody expects them at a fixed place (Wine?) */ | ||
| 25 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 50 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { |
| 51 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | ||
| 52 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | ||
| 53 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | ||
| 54 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | ||
| 55 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | ||
| 56 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | ||
| 57 | } }; | ||
| 58 | #else | ||
| 59 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
| 26 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 60 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
| 27 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 61 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 28 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 62 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
| @@ -56,17 +90,150 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | |||
| 56 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 90 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
| 57 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | 91 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, |
| 58 | } }; | 92 | } }; |
| 93 | #endif | ||
| 59 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 94 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| 60 | 95 | ||
| 61 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 96 | #ifdef CONFIG_X86_32 |
| 62 | |||
| 63 | static int cachesize_override __cpuinitdata = -1; | 97 | static int cachesize_override __cpuinitdata = -1; |
| 64 | static int disable_x86_serial_nr __cpuinitdata = 1; | 98 | static int disable_x86_serial_nr __cpuinitdata = 1; |
| 65 | 99 | ||
| 66 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 100 | static int __init cachesize_setup(char *str) |
| 101 | { | ||
| 102 | get_option(&str, &cachesize_override); | ||
| 103 | return 1; | ||
| 104 | } | ||
| 105 | __setup("cachesize=", cachesize_setup); | ||
| 106 | |||
| 107 | static int __init x86_fxsr_setup(char *s) | ||
| 108 | { | ||
| 109 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
| 110 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
| 111 | return 1; | ||
| 112 | } | ||
| 113 | __setup("nofxsr", x86_fxsr_setup); | ||
| 114 | |||
| 115 | static int __init x86_sep_setup(char *s) | ||
| 116 | { | ||
| 117 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
| 118 | return 1; | ||
| 119 | } | ||
| 120 | __setup("nosep", x86_sep_setup); | ||
| 121 | |||
| 122 | /* Standard macro to see if a specific flag is changeable */ | ||
| 123 | static inline int flag_is_changeable_p(u32 flag) | ||
| 124 | { | ||
| 125 | u32 f1, f2; | ||
| 126 | |||
| 127 | asm("pushfl\n\t" | ||
| 128 | "pushfl\n\t" | ||
| 129 | "popl %0\n\t" | ||
| 130 | "movl %0,%1\n\t" | ||
| 131 | "xorl %2,%0\n\t" | ||
| 132 | "pushl %0\n\t" | ||
| 133 | "popfl\n\t" | ||
| 134 | "pushfl\n\t" | ||
| 135 | "popl %0\n\t" | ||
| 136 | "popfl\n\t" | ||
| 137 | : "=&r" (f1), "=&r" (f2) | ||
| 138 | : "ir" (flag)); | ||
| 139 | |||
| 140 | return ((f1^f2) & flag) != 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* Probe for the CPUID instruction */ | ||
| 144 | static int __cpuinit have_cpuid_p(void) | ||
| 145 | { | ||
| 146 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
| 147 | } | ||
| 148 | |||
| 149 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
| 150 | { | ||
| 151 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | ||
| 152 | /* Disable processor serial number */ | ||
| 153 | unsigned long lo, hi; | ||
| 154 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
| 155 | lo |= 0x200000; | ||
| 156 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
| 157 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
| 158 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
| 159 | |||
| 160 | /* Disabling the serial number may affect the cpuid level */ | ||
| 161 | c->cpuid_level = cpuid_eax(0); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | static int __init x86_serial_nr_setup(char *s) | ||
| 166 | { | ||
| 167 | disable_x86_serial_nr = 0; | ||
| 168 | return 1; | ||
| 169 | } | ||
| 170 | __setup("serialnumber", x86_serial_nr_setup); | ||
| 171 | #else | ||
| 172 | static inline int flag_is_changeable_p(u32 flag) | ||
| 173 | { | ||
| 174 | return 1; | ||
| 175 | } | ||
| 176 | /* Probe for the CPUID instruction */ | ||
| 177 | static inline int have_cpuid_p(void) | ||
| 178 | { | ||
| 179 | return 1; | ||
| 180 | } | ||
| 181 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
| 182 | { | ||
| 183 | } | ||
| 184 | #endif | ||
| 185 | |||
| 186 | /* | ||
| 187 | * Naming convention should be: <Name> [(<Codename>)] | ||
| 188 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
| 189 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
| 190 | * | ||
| 191 | */ | ||
| 192 | |||
| 193 | /* Look up CPU names by table lookup. */ | ||
| 194 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
| 195 | { | ||
| 196 | struct cpu_model_info *info; | ||
| 197 | |||
| 198 | if (c->x86_model >= 16) | ||
| 199 | return NULL; /* Range check */ | ||
| 200 | |||
| 201 | if (!this_cpu) | ||
| 202 | return NULL; | ||
| 203 | |||
| 204 | info = this_cpu->c_models; | ||
| 205 | |||
| 206 | while (info && info->family) { | ||
| 207 | if (info->family == c->x86) | ||
| 208 | return info->model_names[c->x86_model]; | ||
| 209 | info++; | ||
| 210 | } | ||
| 211 | return NULL; /* Not found */ | ||
| 212 | } | ||
| 213 | |||
| 214 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
| 215 | |||
| 216 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
| 217 | * it's on the real one. */ | ||
| 218 | void switch_to_new_gdt(void) | ||
| 219 | { | ||
| 220 | struct desc_ptr gdt_descr; | ||
| 221 | |||
| 222 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
| 223 | gdt_descr.size = GDT_SIZE - 1; | ||
| 224 | load_gdt(&gdt_descr); | ||
| 225 | #ifdef CONFIG_X86_32 | ||
| 226 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
| 227 | #endif | ||
| 228 | } | ||
| 229 | |||
| 230 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
| 67 | 231 | ||
| 68 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 232 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
| 69 | { | 233 | { |
| 234 | #ifdef CONFIG_X86_64 | ||
| 235 | display_cacheinfo(c); | ||
| 236 | #else | ||
| 70 | /* Not much we can do here... */ | 237 | /* Not much we can do here... */ |
| 71 | /* Check if at least it has cpuid */ | 238 | /* Check if at least it has cpuid */ |
| 72 | if (c->cpuid_level == -1) { | 239 | if (c->cpuid_level == -1) { |
| @@ -76,28 +243,22 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
| 76 | else if (c->x86 == 3) | 243 | else if (c->x86 == 3) |
| 77 | strcpy(c->x86_model_id, "386"); | 244 | strcpy(c->x86_model_id, "386"); |
| 78 | } | 245 | } |
| 246 | #endif | ||
| 79 | } | 247 | } |
| 80 | 248 | ||
| 81 | static struct cpu_dev __cpuinitdata default_cpu = { | 249 | static struct cpu_dev __cpuinitdata default_cpu = { |
| 82 | .c_init = default_init, | 250 | .c_init = default_init, |
| 83 | .c_vendor = "Unknown", | 251 | .c_vendor = "Unknown", |
| 252 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
| 84 | }; | 253 | }; |
| 85 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
| 86 | 254 | ||
| 87 | static int __init cachesize_setup(char *str) | 255 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) |
| 88 | { | ||
| 89 | get_option(&str, &cachesize_override); | ||
| 90 | return 1; | ||
| 91 | } | ||
| 92 | __setup("cachesize=", cachesize_setup); | ||
| 93 | |||
| 94 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
| 95 | { | 256 | { |
| 96 | unsigned int *v; | 257 | unsigned int *v; |
| 97 | char *p, *q; | 258 | char *p, *q; |
| 98 | 259 | ||
| 99 | if (cpuid_eax(0x80000000) < 0x80000004) | 260 | if (c->extended_cpuid_level < 0x80000004) |
| 100 | return 0; | 261 | return; |
| 101 | 262 | ||
| 102 | v = (unsigned int *) c->x86_model_id; | 263 | v = (unsigned int *) c->x86_model_id; |
| 103 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 264 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
| @@ -116,30 +277,34 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
| 116 | while (q <= &c->x86_model_id[48]) | 277 | while (q <= &c->x86_model_id[48]) |
| 117 | *q++ = '\0'; /* Zero-pad the rest */ | 278 | *q++ = '\0'; /* Zero-pad the rest */ |
| 118 | } | 279 | } |
| 119 | |||
| 120 | return 1; | ||
| 121 | } | 280 | } |
| 122 | 281 | ||
| 123 | |||
| 124 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 282 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
| 125 | { | 283 | { |
| 126 | unsigned int n, dummy, ecx, edx, l2size; | 284 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
| 127 | 285 | ||
| 128 | n = cpuid_eax(0x80000000); | 286 | n = c->extended_cpuid_level; |
| 129 | 287 | ||
| 130 | if (n >= 0x80000005) { | 288 | if (n >= 0x80000005) { |
| 131 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 289 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
| 132 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 290 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
| 133 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 291 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
| 134 | c->x86_cache_size = (ecx>>24)+(edx>>24); | 292 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
| 293 | #ifdef CONFIG_X86_64 | ||
| 294 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
| 295 | c->x86_tlbsize = 0; | ||
| 296 | #endif | ||
| 135 | } | 297 | } |
| 136 | 298 | ||
| 137 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 299 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
| 138 | return; | 300 | return; |
| 139 | 301 | ||
| 140 | ecx = cpuid_ecx(0x80000006); | 302 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
| 141 | l2size = ecx >> 16; | 303 | l2size = ecx >> 16; |
| 142 | 304 | ||
| 305 | #ifdef CONFIG_X86_64 | ||
| 306 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
| 307 | #else | ||
| 143 | /* do processor-specific cache resizing */ | 308 | /* do processor-specific cache resizing */ |
| 144 | if (this_cpu->c_size_cache) | 309 | if (this_cpu->c_size_cache) |
| 145 | l2size = this_cpu->c_size_cache(c, l2size); | 310 | l2size = this_cpu->c_size_cache(c, l2size); |
| @@ -150,116 +315,106 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
| 150 | 315 | ||
| 151 | if (l2size == 0) | 316 | if (l2size == 0) |
| 152 | return; /* Again, no L2 cache is possible */ | 317 | return; /* Again, no L2 cache is possible */ |
| 318 | #endif | ||
| 153 | 319 | ||
| 154 | c->x86_cache_size = l2size; | 320 | c->x86_cache_size = l2size; |
| 155 | 321 | ||
| 156 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 322 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
| 157 | l2size, ecx & 0xFF); | 323 | l2size, ecx & 0xFF); |
| 158 | } | 324 | } |
| 159 | 325 | ||
| 160 | /* | 326 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
| 161 | * Naming convention should be: <Name> [(<Codename>)] | ||
| 162 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
| 163 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
| 164 | * | ||
| 165 | */ | ||
| 166 | |||
| 167 | /* Look up CPU names by table lookup. */ | ||
| 168 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
| 169 | { | 327 | { |
| 170 | struct cpu_model_info *info; | 328 | #ifdef CONFIG_X86_HT |
| 329 | u32 eax, ebx, ecx, edx; | ||
| 330 | int index_msb, core_bits; | ||
| 171 | 331 | ||
| 172 | if (c->x86_model >= 16) | 332 | if (!cpu_has(c, X86_FEATURE_HT)) |
| 173 | return NULL; /* Range check */ | 333 | return; |
| 174 | 334 | ||
| 175 | if (!this_cpu) | 335 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
| 176 | return NULL; | 336 | goto out; |
| 177 | 337 | ||
| 178 | info = this_cpu->c_models; | 338 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
| 339 | return; | ||
| 179 | 340 | ||
| 180 | while (info && info->family) { | 341 | cpuid(1, &eax, &ebx, &ecx, &edx); |
| 181 | if (info->family == c->x86) | 342 | |
| 182 | return info->model_names[c->x86_model]; | 343 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
| 183 | info++; | 344 | |
| 345 | if (smp_num_siblings == 1) { | ||
| 346 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
| 347 | } else if (smp_num_siblings > 1) { | ||
| 348 | |||
| 349 | if (smp_num_siblings > NR_CPUS) { | ||
| 350 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | ||
| 351 | smp_num_siblings); | ||
| 352 | smp_num_siblings = 1; | ||
| 353 | return; | ||
| 354 | } | ||
| 355 | |||
| 356 | index_msb = get_count_order(smp_num_siblings); | ||
| 357 | #ifdef CONFIG_X86_64 | ||
| 358 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
| 359 | #else | ||
| 360 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
| 361 | #endif | ||
| 362 | |||
| 363 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
| 364 | |||
| 365 | index_msb = get_count_order(smp_num_siblings); | ||
| 366 | |||
| 367 | core_bits = get_count_order(c->x86_max_cores); | ||
| 368 | |||
| 369 | #ifdef CONFIG_X86_64 | ||
| 370 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
| 371 | ((1 << core_bits) - 1); | ||
| 372 | #else | ||
| 373 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
| 374 | ((1 << core_bits) - 1); | ||
| 375 | #endif | ||
| 184 | } | 376 | } |
| 185 | return NULL; /* Not found */ | ||
| 186 | } | ||
| 187 | 377 | ||
| 378 | out: | ||
| 379 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
| 380 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
| 381 | c->phys_proc_id); | ||
| 382 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
| 383 | c->cpu_core_id); | ||
| 384 | } | ||
| 385 | #endif | ||
| 386 | } | ||
| 188 | 387 | ||
| 189 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 388 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
| 190 | { | 389 | { |
| 191 | char *v = c->x86_vendor_id; | 390 | char *v = c->x86_vendor_id; |
| 192 | int i; | 391 | int i; |
| 193 | static int printed; | 392 | static int printed; |
| 194 | 393 | ||
| 195 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 394 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
| 196 | if (cpu_devs[i]) { | 395 | if (!cpu_devs[i]) |
| 197 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 396 | break; |
| 198 | (cpu_devs[i]->c_ident[1] && | 397 | |
| 199 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 398 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
| 200 | c->x86_vendor = i; | 399 | (cpu_devs[i]->c_ident[1] && |
| 201 | if (!early) | 400 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
| 202 | this_cpu = cpu_devs[i]; | 401 | this_cpu = cpu_devs[i]; |
| 203 | return; | 402 | c->x86_vendor = this_cpu->c_x86_vendor; |
| 204 | } | 403 | return; |
| 205 | } | 404 | } |
| 206 | } | 405 | } |
| 406 | |||
| 207 | if (!printed) { | 407 | if (!printed) { |
| 208 | printed++; | 408 | printed++; |
| 209 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | 409 | printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); |
| 210 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 410 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
| 211 | } | 411 | } |
| 412 | |||
| 212 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 413 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 213 | this_cpu = &default_cpu; | 414 | this_cpu = &default_cpu; |
| 214 | } | 415 | } |
| 215 | 416 | ||
| 216 | 417 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |
| 217 | static int __init x86_fxsr_setup(char *s) | ||
| 218 | { | ||
| 219 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
| 220 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
| 221 | return 1; | ||
| 222 | } | ||
| 223 | __setup("nofxsr", x86_fxsr_setup); | ||
| 224 | |||
| 225 | |||
| 226 | static int __init x86_sep_setup(char *s) | ||
| 227 | { | ||
| 228 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
| 229 | return 1; | ||
| 230 | } | ||
| 231 | __setup("nosep", x86_sep_setup); | ||
| 232 | |||
| 233 | |||
| 234 | /* Standard macro to see if a specific flag is changeable */ | ||
| 235 | static inline int flag_is_changeable_p(u32 flag) | ||
| 236 | { | ||
| 237 | u32 f1, f2; | ||
| 238 | |||
| 239 | asm("pushfl\n\t" | ||
| 240 | "pushfl\n\t" | ||
| 241 | "popl %0\n\t" | ||
| 242 | "movl %0,%1\n\t" | ||
| 243 | "xorl %2,%0\n\t" | ||
| 244 | "pushl %0\n\t" | ||
| 245 | "popfl\n\t" | ||
| 246 | "pushfl\n\t" | ||
| 247 | "popl %0\n\t" | ||
| 248 | "popfl\n\t" | ||
| 249 | : "=&r" (f1), "=&r" (f2) | ||
| 250 | : "ir" (flag)); | ||
| 251 | |||
| 252 | return ((f1^f2) & flag) != 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | |||
| 256 | /* Probe for the CPUID instruction */ | ||
| 257 | static int __cpuinit have_cpuid_p(void) | ||
| 258 | { | ||
| 259 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
| 260 | } | ||
| 261 | |||
| 262 | void __init cpu_detect(struct cpuinfo_x86 *c) | ||
| 263 | { | 418 | { |
| 264 | /* Get vendor name */ | 419 | /* Get vendor name */ |
| 265 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 420 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
| @@ -268,50 +423,87 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
| 268 | (unsigned int *)&c->x86_vendor_id[4]); | 423 | (unsigned int *)&c->x86_vendor_id[4]); |
| 269 | 424 | ||
| 270 | c->x86 = 4; | 425 | c->x86 = 4; |
| 426 | /* Intel-defined flags: level 0x00000001 */ | ||
| 271 | if (c->cpuid_level >= 0x00000001) { | 427 | if (c->cpuid_level >= 0x00000001) { |
| 272 | u32 junk, tfms, cap0, misc; | 428 | u32 junk, tfms, cap0, misc; |
| 273 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 429 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
| 274 | c->x86 = (tfms >> 8) & 15; | 430 | c->x86 = (tfms >> 8) & 0xf; |
| 275 | c->x86_model = (tfms >> 4) & 15; | 431 | c->x86_model = (tfms >> 4) & 0xf; |
| 432 | c->x86_mask = tfms & 0xf; | ||
| 276 | if (c->x86 == 0xf) | 433 | if (c->x86 == 0xf) |
| 277 | c->x86 += (tfms >> 20) & 0xff; | 434 | c->x86 += (tfms >> 20) & 0xff; |
| 278 | if (c->x86 >= 0x6) | 435 | if (c->x86 >= 0x6) |
| 279 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 436 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
| 280 | c->x86_mask = tfms & 15; | ||
| 281 | if (cap0 & (1<<19)) { | 437 | if (cap0 & (1<<19)) { |
| 282 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
| 283 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 438 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
| 439 | c->x86_cache_alignment = c->x86_clflush_size; | ||
| 284 | } | 440 | } |
| 285 | } | 441 | } |
| 286 | } | 442 | } |
| 287 | static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | 443 | |
| 444 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | ||
| 288 | { | 445 | { |
| 289 | u32 tfms, xlvl; | 446 | u32 tfms, xlvl; |
| 290 | unsigned int ebx; | 447 | u32 ebx; |
| 291 | 448 | ||
| 292 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 449 | /* Intel-defined flags: level 0x00000001 */ |
| 293 | if (have_cpuid_p()) { | 450 | if (c->cpuid_level >= 0x00000001) { |
| 294 | /* Intel-defined flags: level 0x00000001 */ | 451 | u32 capability, excap; |
| 295 | if (c->cpuid_level >= 0x00000001) { | 452 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
| 296 | u32 capability, excap; | 453 | c->x86_capability[0] = capability; |
| 297 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 454 | c->x86_capability[4] = excap; |
| 298 | c->x86_capability[0] = capability; | 455 | } |
| 299 | c->x86_capability[4] = excap; | ||
| 300 | } | ||
| 301 | 456 | ||
| 302 | /* AMD-defined flags: level 0x80000001 */ | 457 | /* AMD-defined flags: level 0x80000001 */ |
| 303 | xlvl = cpuid_eax(0x80000000); | 458 | xlvl = cpuid_eax(0x80000000); |
| 304 | if ((xlvl & 0xffff0000) == 0x80000000) { | 459 | c->extended_cpuid_level = xlvl; |
| 305 | if (xlvl >= 0x80000001) { | 460 | if ((xlvl & 0xffff0000) == 0x80000000) { |
| 306 | c->x86_capability[1] = cpuid_edx(0x80000001); | 461 | if (xlvl >= 0x80000001) { |
| 307 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 462 | c->x86_capability[1] = cpuid_edx(0x80000001); |
| 308 | } | 463 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
| 309 | } | 464 | } |
| 465 | } | ||
| 310 | 466 | ||
| 467 | #ifdef CONFIG_X86_64 | ||
| 468 | if (c->extended_cpuid_level >= 0x80000008) { | ||
| 469 | u32 eax = cpuid_eax(0x80000008); | ||
| 470 | |||
| 471 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
| 472 | c->x86_phys_bits = eax & 0xff; | ||
| 311 | } | 473 | } |
| 474 | #endif | ||
| 475 | |||
| 476 | if (c->extended_cpuid_level >= 0x80000007) | ||
| 477 | c->x86_power = cpuid_edx(0x80000007); | ||
| 312 | 478 | ||
| 313 | } | 479 | } |
| 314 | 480 | ||
| 481 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | ||
| 482 | { | ||
| 483 | #ifdef CONFIG_X86_32 | ||
| 484 | int i; | ||
| 485 | |||
| 486 | /* | ||
| 487 | * First of all, decide if this is a 486 or higher | ||
| 488 | * It's a 486 if we can modify the AC flag | ||
| 489 | */ | ||
| 490 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
| 491 | c->x86 = 4; | ||
| 492 | else | ||
| 493 | c->x86 = 3; | ||
| 494 | |||
| 495 | for (i = 0; i < X86_VENDOR_NUM; i++) | ||
| 496 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { | ||
| 497 | c->x86_vendor_id[0] = 0; | ||
| 498 | cpu_devs[i]->c_identify(c); | ||
| 499 | if (c->x86_vendor_id[0]) { | ||
| 500 | get_cpu_vendor(c); | ||
| 501 | break; | ||
| 502 | } | ||
| 503 | } | ||
| 504 | #endif | ||
| 505 | } | ||
| 506 | |||
| 315 | /* | 507 | /* |
| 316 | * Do minimum CPU detection early. | 508 | * Do minimum CPU detection early. |
| 317 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 509 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
| @@ -321,25 +513,61 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
| 321 | * WARNING: this function is only called on the BP. Don't add code here | 513 | * WARNING: this function is only called on the BP. Don't add code here |
| 322 | * that is supposed to run on all CPUs. | 514 | * that is supposed to run on all CPUs. |
| 323 | */ | 515 | */ |
| 324 | static void __init early_cpu_detect(void) | 516 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
| 325 | { | 517 | { |
| 326 | struct cpuinfo_x86 *c = &boot_cpu_data; | 518 | #ifdef CONFIG_X86_64 |
| 327 | 519 | c->x86_clflush_size = 64; | |
| 328 | c->x86_cache_alignment = 32; | 520 | #else |
| 329 | c->x86_clflush_size = 32; | 521 | c->x86_clflush_size = 32; |
| 522 | #endif | ||
| 523 | c->x86_cache_alignment = c->x86_clflush_size; | ||
| 524 | |||
| 525 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
| 526 | c->extended_cpuid_level = 0; | ||
| 330 | 527 | ||
| 331 | if (!have_cpuid_p()) | 528 | if (!have_cpuid_p()) |
| 529 | identify_cpu_without_cpuid(c); | ||
| 530 | |||
| 531 | /* cyrix could have cpuid enabled via c_identify()*/ | ||
| 532 | if (!have_cpuid_p()) | ||
| 332 | return; | 533 | return; |
| 333 | 534 | ||
| 334 | cpu_detect(c); | 535 | cpu_detect(c); |
| 335 | 536 | ||
| 336 | get_cpu_vendor(c, 1); | 537 | get_cpu_vendor(c); |
| 337 | 538 | ||
| 338 | early_get_cap(c); | 539 | get_cpu_cap(c); |
| 339 | 540 | ||
| 340 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 541 | if (this_cpu->c_early_init) |
| 341 | cpu_devs[c->x86_vendor]->c_early_init) | 542 | this_cpu->c_early_init(c); |
| 342 | cpu_devs[c->x86_vendor]->c_early_init(c); | 543 | |
| 544 | validate_pat_support(c); | ||
| 545 | } | ||
| 546 | |||
| 547 | void __init early_cpu_init(void) | ||
| 548 | { | ||
| 549 | struct cpu_dev **cdev; | ||
| 550 | int count = 0; | ||
| 551 | |||
| 552 | printk("KERNEL supported cpus:\n"); | ||
| 553 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | ||
| 554 | struct cpu_dev *cpudev = *cdev; | ||
| 555 | unsigned int j; | ||
| 556 | |||
| 557 | if (count >= X86_VENDOR_NUM) | ||
| 558 | break; | ||
| 559 | cpu_devs[count] = cpudev; | ||
| 560 | count++; | ||
| 561 | |||
| 562 | for (j = 0; j < 2; j++) { | ||
| 563 | if (!cpudev->c_ident[j]) | ||
| 564 | continue; | ||
| 565 | printk(" %s %s\n", cpudev->c_vendor, | ||
| 566 | cpudev->c_ident[j]); | ||
| 567 | } | ||
| 568 | } | ||
| 569 | |||
| 570 | early_identify_cpu(&boot_cpu_data); | ||
| 343 | } | 571 | } |
| 344 | 572 | ||
| 345 | /* | 573 | /* |
| @@ -357,86 +585,41 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | |||
| 357 | 585 | ||
| 358 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 586 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
| 359 | { | 587 | { |
| 360 | u32 tfms, xlvl; | 588 | c->extended_cpuid_level = 0; |
| 361 | unsigned int ebx; | ||
| 362 | |||
| 363 | if (have_cpuid_p()) { | ||
| 364 | /* Get vendor name */ | ||
| 365 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
| 366 | (unsigned int *)&c->x86_vendor_id[0], | ||
| 367 | (unsigned int *)&c->x86_vendor_id[8], | ||
| 368 | (unsigned int *)&c->x86_vendor_id[4]); | ||
| 369 | |||
| 370 | get_cpu_vendor(c, 0); | ||
| 371 | /* Initialize the standard set of capabilities */ | ||
| 372 | /* Note that the vendor-specific code below might override */ | ||
| 373 | /* Intel-defined flags: level 0x00000001 */ | ||
| 374 | if (c->cpuid_level >= 0x00000001) { | ||
| 375 | u32 capability, excap; | ||
| 376 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | ||
| 377 | c->x86_capability[0] = capability; | ||
| 378 | c->x86_capability[4] = excap; | ||
| 379 | c->x86 = (tfms >> 8) & 15; | ||
| 380 | c->x86_model = (tfms >> 4) & 15; | ||
| 381 | if (c->x86 == 0xf) | ||
| 382 | c->x86 += (tfms >> 20) & 0xff; | ||
| 383 | if (c->x86 >= 0x6) | ||
| 384 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
| 385 | c->x86_mask = tfms & 15; | ||
| 386 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
| 387 | #ifdef CONFIG_X86_HT | ||
| 388 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
| 389 | c->phys_proc_id = c->initial_apicid; | ||
| 390 | #else | ||
| 391 | c->apicid = c->initial_apicid; | ||
| 392 | #endif | ||
| 393 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
| 394 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | ||
| 395 | } else { | ||
| 396 | /* Have CPUID level 0 only - unheard of */ | ||
| 397 | c->x86 = 4; | ||
| 398 | } | ||
| 399 | 589 | ||
| 400 | /* AMD-defined flags: level 0x80000001 */ | 590 | if (!have_cpuid_p()) |
| 401 | xlvl = cpuid_eax(0x80000000); | 591 | identify_cpu_without_cpuid(c); |
| 402 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
| 403 | if (xlvl >= 0x80000001) { | ||
| 404 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
| 405 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
| 406 | } | ||
| 407 | if (xlvl >= 0x80000004) | ||
| 408 | get_model_name(c); /* Default name */ | ||
| 409 | } | ||
| 410 | 592 | ||
| 411 | init_scattered_cpuid_features(c); | 593 | /* cyrix could have cpuid enabled via c_identify()*/ |
| 412 | detect_nopl(c); | 594 | if (!have_cpuid_p()) |
| 413 | } | 595 | return; |
| 414 | } | ||
| 415 | 596 | ||
| 416 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 597 | cpu_detect(c); |
| 417 | { | ||
| 418 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | ||
| 419 | /* Disable processor serial number */ | ||
| 420 | unsigned long lo, hi; | ||
| 421 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
| 422 | lo |= 0x200000; | ||
| 423 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
| 424 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
| 425 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
| 426 | 598 | ||
| 427 | /* Disabling the serial number may affect the cpuid level */ | 599 | get_cpu_vendor(c); |
| 428 | c->cpuid_level = cpuid_eax(0); | ||
| 429 | } | ||
| 430 | } | ||
| 431 | 600 | ||
| 432 | static int __init x86_serial_nr_setup(char *s) | 601 | get_cpu_cap(c); |
| 433 | { | ||
| 434 | disable_x86_serial_nr = 0; | ||
| 435 | return 1; | ||
| 436 | } | ||
| 437 | __setup("serialnumber", x86_serial_nr_setup); | ||
| 438 | 602 | ||
| 603 | if (c->cpuid_level >= 0x00000001) { | ||
| 604 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | ||
| 605 | #ifdef CONFIG_X86_32 | ||
| 606 | # ifdef CONFIG_X86_HT | ||
| 607 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
| 608 | # else | ||
| 609 | c->apicid = c->initial_apicid; | ||
| 610 | # endif | ||
| 611 | #endif | ||
| 439 | 612 | ||
| 613 | #ifdef CONFIG_X86_HT | ||
| 614 | c->phys_proc_id = c->initial_apicid; | ||
| 615 | #endif | ||
| 616 | } | ||
| 617 | |||
| 618 | get_model_name(c); /* Default name */ | ||
| 619 | |||
| 620 | init_scattered_cpuid_features(c); | ||
| 621 | detect_nopl(c); | ||
| 622 | } | ||
| 440 | 623 | ||
| 441 | /* | 624 | /* |
| 442 | * This does the hard work of actually picking apart the CPU stuff... | 625 | * This does the hard work of actually picking apart the CPU stuff... |
| @@ -448,30 +631,29 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 448 | c->loops_per_jiffy = loops_per_jiffy; | 631 | c->loops_per_jiffy = loops_per_jiffy; |
| 449 | c->x86_cache_size = -1; | 632 | c->x86_cache_size = -1; |
| 450 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 633 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 451 | c->cpuid_level = -1; /* CPUID not detected */ | ||
| 452 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | 634 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
| 453 | c->x86_vendor_id[0] = '\0'; /* Unset */ | 635 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
| 454 | c->x86_model_id[0] = '\0'; /* Unset */ | 636 | c->x86_model_id[0] = '\0'; /* Unset */ |
| 455 | c->x86_max_cores = 1; | 637 | c->x86_max_cores = 1; |
| 638 | c->x86_coreid_bits = 0; | ||
| 639 | #ifdef CONFIG_X86_64 | ||
| 640 | c->x86_clflush_size = 64; | ||
| 641 | #else | ||
| 642 | c->cpuid_level = -1; /* CPUID not detected */ | ||
| 456 | c->x86_clflush_size = 32; | 643 | c->x86_clflush_size = 32; |
| 644 | #endif | ||
| 645 | c->x86_cache_alignment = c->x86_clflush_size; | ||
| 457 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 646 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
| 458 | 647 | ||
| 459 | if (!have_cpuid_p()) { | ||
| 460 | /* | ||
| 461 | * First of all, decide if this is a 486 or higher | ||
| 462 | * It's a 486 if we can modify the AC flag | ||
| 463 | */ | ||
| 464 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
| 465 | c->x86 = 4; | ||
| 466 | else | ||
| 467 | c->x86 = 3; | ||
| 468 | } | ||
| 469 | |||
| 470 | generic_identify(c); | 648 | generic_identify(c); |
| 471 | 649 | ||
| 472 | if (this_cpu->c_identify) | 650 | if (this_cpu->c_identify) |
| 473 | this_cpu->c_identify(c); | 651 | this_cpu->c_identify(c); |
| 474 | 652 | ||
| 653 | #ifdef CONFIG_X86_64 | ||
| 654 | c->apicid = phys_pkg_id(0); | ||
| 655 | #endif | ||
| 656 | |||
| 475 | /* | 657 | /* |
| 476 | * Vendor-specific initialization. In this section we | 658 | * Vendor-specific initialization. In this section we |
| 477 | * canonicalize the feature flags, meaning if there are | 659 | * canonicalize the feature flags, meaning if there are |
| @@ -505,6 +687,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 505 | c->x86, c->x86_model); | 687 | c->x86, c->x86_model); |
| 506 | } | 688 | } |
| 507 | 689 | ||
| 690 | #ifdef CONFIG_X86_64 | ||
| 691 | detect_ht(c); | ||
| 692 | #endif | ||
| 693 | |||
| 508 | /* | 694 | /* |
| 509 | * On SMP, boot_cpu_data holds the common feature set between | 695 | * On SMP, boot_cpu_data holds the common feature set between |
| 510 | * all CPUs; so make sure that we indicate which features are | 696 | * all CPUs; so make sure that we indicate which features are |
| @@ -513,7 +699,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 513 | */ | 699 | */ |
| 514 | if (c != &boot_cpu_data) { | 700 | if (c != &boot_cpu_data) { |
| 515 | /* AND the already accumulated flags with these */ | 701 | /* AND the already accumulated flags with these */ |
| 516 | for (i = 0 ; i < NCAPINTS ; i++) | 702 | for (i = 0; i < NCAPINTS; i++) |
| 517 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 703 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
| 518 | } | 704 | } |
| 519 | 705 | ||
| @@ -521,72 +707,79 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 521 | for (i = 0; i < NCAPINTS; i++) | 707 | for (i = 0; i < NCAPINTS; i++) |
| 522 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | 708 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; |
| 523 | 709 | ||
| 710 | #ifdef CONFIG_X86_MCE | ||
| 524 | /* Init Machine Check Exception if available. */ | 711 | /* Init Machine Check Exception if available. */ |
| 525 | mcheck_init(c); | 712 | mcheck_init(c); |
| 713 | #endif | ||
| 526 | 714 | ||
| 527 | select_idle_routine(c); | 715 | select_idle_routine(c); |
| 716 | |||
| 717 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
| 718 | numa_add_cpu(smp_processor_id()); | ||
| 719 | #endif | ||
| 528 | } | 720 | } |
| 529 | 721 | ||
| 530 | void __init identify_boot_cpu(void) | 722 | void __init identify_boot_cpu(void) |
| 531 | { | 723 | { |
| 532 | identify_cpu(&boot_cpu_data); | 724 | identify_cpu(&boot_cpu_data); |
| 725 | #ifdef CONFIG_X86_32 | ||
| 533 | sysenter_setup(); | 726 | sysenter_setup(); |
| 534 | enable_sep_cpu(); | 727 | enable_sep_cpu(); |
| 728 | #endif | ||
| 535 | } | 729 | } |
| 536 | 730 | ||
| 537 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 731 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
| 538 | { | 732 | { |
| 539 | BUG_ON(c == &boot_cpu_data); | 733 | BUG_ON(c == &boot_cpu_data); |
| 540 | identify_cpu(c); | 734 | identify_cpu(c); |
| 735 | #ifdef CONFIG_X86_32 | ||
| 541 | enable_sep_cpu(); | 736 | enable_sep_cpu(); |
| 737 | #endif | ||
| 542 | mtrr_ap_init(); | 738 | mtrr_ap_init(); |
| 543 | } | 739 | } |
| 544 | 740 | ||
| 545 | #ifdef CONFIG_X86_HT | 741 | struct msr_range { |
| 546 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 742 | unsigned min; |
| 547 | { | 743 | unsigned max; |
| 548 | u32 eax, ebx, ecx, edx; | 744 | }; |
| 549 | int index_msb, core_bits; | ||
| 550 | |||
| 551 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
| 552 | |||
| 553 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
| 554 | return; | ||
| 555 | |||
| 556 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
| 557 | 745 | ||
| 558 | if (smp_num_siblings == 1) { | 746 | static struct msr_range msr_range_array[] __cpuinitdata = { |
| 559 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 747 | { 0x00000000, 0x00000418}, |
| 560 | } else if (smp_num_siblings > 1) { | 748 | { 0xc0000000, 0xc000040b}, |
| 749 | { 0xc0010000, 0xc0010142}, | ||
| 750 | { 0xc0011000, 0xc001103b}, | ||
| 751 | }; | ||
| 561 | 752 | ||
| 562 | if (smp_num_siblings > NR_CPUS) { | 753 | static void __cpuinit print_cpu_msr(void) |
| 563 | printk(KERN_WARNING "CPU: Unsupported number of the " | 754 | { |
| 564 | "siblings %d", smp_num_siblings); | 755 | unsigned index; |
| 565 | smp_num_siblings = 1; | 756 | u64 val; |
| 566 | return; | 757 | int i; |
| 758 | unsigned index_min, index_max; | ||
| 759 | |||
| 760 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
| 761 | index_min = msr_range_array[i].min; | ||
| 762 | index_max = msr_range_array[i].max; | ||
| 763 | for (index = index_min; index < index_max; index++) { | ||
| 764 | if (rdmsrl_amd_safe(index, &val)) | ||
| 765 | continue; | ||
| 766 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
| 567 | } | 767 | } |
| 768 | } | ||
| 769 | } | ||
| 568 | 770 | ||
| 569 | index_msb = get_count_order(smp_num_siblings); | 771 | static int show_msr __cpuinitdata; |
| 570 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | 772 | static __init int setup_show_msr(char *arg) |
| 571 | 773 | { | |
| 572 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 774 | int num; |
| 573 | c->phys_proc_id); | ||
| 574 | |||
| 575 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
| 576 | |||
| 577 | index_msb = get_count_order(smp_num_siblings) ; | ||
| 578 | 775 | ||
| 579 | core_bits = get_count_order(c->x86_max_cores); | 776 | get_option(&arg, &num); |
| 580 | 777 | ||
| 581 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | 778 | if (num > 0) |
| 582 | ((1 << core_bits) - 1); | 779 | show_msr = num; |
| 583 | 780 | return 1; | |
| 584 | if (c->x86_max_cores > 1) | ||
| 585 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
| 586 | c->cpu_core_id); | ||
| 587 | } | ||
| 588 | } | 781 | } |
| 589 | #endif | 782 | __setup("show_msr=", setup_show_msr); |
| 590 | 783 | ||
| 591 | static __init int setup_noclflush(char *arg) | 784 | static __init int setup_noclflush(char *arg) |
| 592 | { | 785 | { |
| @@ -605,17 +798,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
| 605 | vendor = c->x86_vendor_id; | 798 | vendor = c->x86_vendor_id; |
| 606 | 799 | ||
| 607 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | 800 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
| 608 | printk("%s ", vendor); | 801 | printk(KERN_CONT "%s ", vendor); |
| 609 | 802 | ||
| 610 | if (!c->x86_model_id[0]) | 803 | if (c->x86_model_id[0]) |
| 611 | printk("%d86", c->x86); | 804 | printk(KERN_CONT "%s", c->x86_model_id); |
| 612 | else | 805 | else |
| 613 | printk("%s", c->x86_model_id); | 806 | printk(KERN_CONT "%d86", c->x86); |
| 614 | 807 | ||
| 615 | if (c->x86_mask || c->cpuid_level >= 0) | 808 | if (c->x86_mask || c->cpuid_level >= 0) |
| 616 | printk(" stepping %02x\n", c->x86_mask); | 809 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
| 617 | else | 810 | else |
| 618 | printk("\n"); | 811 | printk(KERN_CONT "\n"); |
| 812 | |||
| 813 | #ifdef CONFIG_SMP | ||
| 814 | if (c->cpu_index < show_msr) | ||
| 815 | print_cpu_msr(); | ||
| 816 | #else | ||
| 817 | if (show_msr) | ||
| 818 | print_cpu_msr(); | ||
| 819 | #endif | ||
| 619 | } | 820 | } |
| 620 | 821 | ||
| 621 | static __init int setup_disablecpuid(char *arg) | 822 | static __init int setup_disablecpuid(char *arg) |
| @@ -631,19 +832,89 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
| 631 | 832 | ||
| 632 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 833 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
| 633 | 834 | ||
| 634 | void __init early_cpu_init(void) | 835 | #ifdef CONFIG_X86_64 |
| 836 | struct x8664_pda **_cpu_pda __read_mostly; | ||
| 837 | EXPORT_SYMBOL(_cpu_pda); | ||
| 838 | |||
| 839 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | ||
| 840 | |||
| 841 | char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | ||
| 842 | |||
| 843 | void __cpuinit pda_init(int cpu) | ||
| 844 | { | ||
| 845 | struct x8664_pda *pda = cpu_pda(cpu); | ||
| 846 | |||
| 847 | /* Setup up data that may be needed in __get_free_pages early */ | ||
| 848 | loadsegment(fs, 0); | ||
| 849 | loadsegment(gs, 0); | ||
| 850 | /* Memory clobbers used to order PDA accessed */ | ||
| 851 | mb(); | ||
| 852 | wrmsrl(MSR_GS_BASE, pda); | ||
| 853 | mb(); | ||
| 854 | |||
| 855 | pda->cpunumber = cpu; | ||
| 856 | pda->irqcount = -1; | ||
| 857 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
| 858 | PDA_STACKOFFSET + THREAD_SIZE; | ||
| 859 | pda->active_mm = &init_mm; | ||
| 860 | pda->mmu_state = 0; | ||
| 861 | |||
| 862 | if (cpu == 0) { | ||
| 863 | /* others are initialized in smpboot.c */ | ||
| 864 | pda->pcurrent = &init_task; | ||
| 865 | pda->irqstackptr = boot_cpu_stack; | ||
| 866 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 867 | } else { | ||
| 868 | if (!pda->irqstackptr) { | ||
| 869 | pda->irqstackptr = (char *) | ||
| 870 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
| 871 | if (!pda->irqstackptr) | ||
| 872 | panic("cannot allocate irqstack for cpu %d", | ||
| 873 | cpu); | ||
| 874 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 875 | } | ||
| 876 | |||
| 877 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | ||
| 878 | pda->nodenumber = cpu_to_node(cpu); | ||
| 879 | } | ||
| 880 | } | ||
| 881 | |||
| 882 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | ||
| 883 | DEBUG_STKSZ] __page_aligned_bss; | ||
| 884 | |||
| 885 | extern asmlinkage void ignore_sysret(void); | ||
| 886 | |||
| 887 | /* May not be marked __init: used by software suspend */ | ||
| 888 | void syscall_init(void) | ||
| 635 | { | 889 | { |
| 636 | struct cpu_vendor_dev *cvdev; | 890 | /* |
| 891 | * LSTAR and STAR live in a bit strange symbiosis. | ||
| 892 | * They both write to the same internal register. STAR allows to | ||
| 893 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | ||
| 894 | */ | ||
| 895 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | ||
| 896 | wrmsrl(MSR_LSTAR, system_call); | ||
| 897 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
| 637 | 898 | ||
| 638 | for (cvdev = __x86cpuvendor_start ; | 899 | #ifdef CONFIG_IA32_EMULATION |
| 639 | cvdev < __x86cpuvendor_end ; | 900 | syscall32_cpu_init(); |
| 640 | cvdev++) | 901 | #endif |
| 641 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
| 642 | 902 | ||
| 643 | early_cpu_detect(); | 903 | /* Flags to clear on syscall */ |
| 644 | validate_pat_support(&boot_cpu_data); | 904 | wrmsrl(MSR_SYSCALL_MASK, |
| 905 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | ||
| 645 | } | 906 | } |
| 646 | 907 | ||
| 908 | unsigned long kernel_eflags; | ||
| 909 | |||
| 910 | /* | ||
| 911 | * Copies of the original ist values from the tss are only accessed during | ||
| 912 | * debugging, no special alignment required. | ||
| 913 | */ | ||
| 914 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | ||
| 915 | |||
| 916 | #else | ||
| 917 | |||
| 647 | /* Make sure %fs is initialized properly in idle threads */ | 918 | /* Make sure %fs is initialized properly in idle threads */ |
| 648 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 919 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
| 649 | { | 920 | { |
| @@ -651,25 +922,136 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
| 651 | regs->fs = __KERNEL_PERCPU; | 922 | regs->fs = __KERNEL_PERCPU; |
| 652 | return regs; | 923 | return regs; |
| 653 | } | 924 | } |
| 654 | 925 | #endif | |
| 655 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
| 656 | * it's on the real one. */ | ||
| 657 | void switch_to_new_gdt(void) | ||
| 658 | { | ||
| 659 | struct desc_ptr gdt_descr; | ||
| 660 | |||
| 661 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
| 662 | gdt_descr.size = GDT_SIZE - 1; | ||
| 663 | load_gdt(&gdt_descr); | ||
| 664 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
| 665 | } | ||
| 666 | 926 | ||
| 667 | /* | 927 | /* |
| 668 | * cpu_init() initializes state that is per-CPU. Some data is already | 928 | * cpu_init() initializes state that is per-CPU. Some data is already |
| 669 | * initialized (naturally) in the bootstrap process, such as the GDT | 929 | * initialized (naturally) in the bootstrap process, such as the GDT |
| 670 | * and IDT. We reload them nevertheless, this function acts as a | 930 | * and IDT. We reload them nevertheless, this function acts as a |
| 671 | * 'CPU state barrier', nothing should get across. | 931 | * 'CPU state barrier', nothing should get across. |
| 932 | * A lot of state is already set up in PDA init for 64 bit | ||
| 672 | */ | 933 | */ |
| 934 | #ifdef CONFIG_X86_64 | ||
| 935 | void __cpuinit cpu_init(void) | ||
| 936 | { | ||
| 937 | int cpu = stack_smp_processor_id(); | ||
| 938 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
| 939 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
| 940 | unsigned long v; | ||
| 941 | char *estacks = NULL; | ||
| 942 | struct task_struct *me; | ||
| 943 | int i; | ||
| 944 | |||
| 945 | /* CPU 0 is initialised in head64.c */ | ||
| 946 | if (cpu != 0) | ||
| 947 | pda_init(cpu); | ||
| 948 | else | ||
| 949 | estacks = boot_exception_stacks; | ||
| 950 | |||
| 951 | me = current; | ||
| 952 | |||
| 953 | if (cpu_test_and_set(cpu, cpu_initialized)) | ||
| 954 | panic("CPU#%d already initialized!\n", cpu); | ||
| 955 | |||
| 956 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
| 957 | |||
| 958 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
| 959 | |||
| 960 | /* | ||
| 961 | * Initialize the per-CPU GDT with the boot GDT, | ||
| 962 | * and set up the GDT descriptor: | ||
| 963 | */ | ||
| 964 | |||
| 965 | switch_to_new_gdt(); | ||
| 966 | load_idt((const struct desc_ptr *)&idt_descr); | ||
| 967 | |||
| 968 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | ||
| 969 | syscall_init(); | ||
| 970 | |||
| 971 | wrmsrl(MSR_FS_BASE, 0); | ||
| 972 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | ||
| 973 | barrier(); | ||
| 974 | |||
| 975 | check_efer(); | ||
| 976 | if (cpu != 0 && x2apic) | ||
| 977 | enable_x2apic(); | ||
| 978 | |||
| 979 | /* | ||
| 980 | * set up and load the per-CPU TSS | ||
| 981 | */ | ||
| 982 | if (!orig_ist->ist[0]) { | ||
| 983 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
| 984 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
| 985 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
| 986 | }; | ||
| 987 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | ||
| 988 | if (cpu) { | ||
| 989 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
| 990 | if (!estacks) | ||
| 991 | panic("Cannot allocate exception " | ||
| 992 | "stack %ld %d\n", v, cpu); | ||
| 993 | } | ||
| 994 | estacks += PAGE_SIZE << order[v]; | ||
| 995 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
| 996 | (unsigned long)estacks; | ||
| 997 | } | ||
| 998 | } | ||
| 999 | |||
| 1000 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
| 1001 | /* | ||
| 1002 | * <= is required because the CPU will access up to | ||
| 1003 | * 8 bits beyond the end of the IO permission bitmap. | ||
| 1004 | */ | ||
| 1005 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | ||
| 1006 | t->io_bitmap[i] = ~0UL; | ||
| 1007 | |||
| 1008 | atomic_inc(&init_mm.mm_count); | ||
| 1009 | me->active_mm = &init_mm; | ||
| 1010 | if (me->mm) | ||
| 1011 | BUG(); | ||
| 1012 | enter_lazy_tlb(&init_mm, me); | ||
| 1013 | |||
| 1014 | load_sp0(t, ¤t->thread); | ||
| 1015 | set_tss_desc(cpu, t); | ||
| 1016 | load_TR_desc(); | ||
| 1017 | load_LDT(&init_mm.context); | ||
| 1018 | |||
| 1019 | #ifdef CONFIG_KGDB | ||
| 1020 | /* | ||
| 1021 | * If the kgdb is connected no debug regs should be altered. This | ||
| 1022 | * is only applicable when KGDB and a KGDB I/O module are built | ||
| 1023 | * into the kernel and you are using early debugging with | ||
| 1024 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
| 1025 | */ | ||
| 1026 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
| 1027 | arch_kgdb_ops.correct_hw_break(); | ||
| 1028 | else { | ||
| 1029 | #endif | ||
| 1030 | /* | ||
| 1031 | * Clear all 6 debug registers: | ||
| 1032 | */ | ||
| 1033 | |||
| 1034 | set_debugreg(0UL, 0); | ||
| 1035 | set_debugreg(0UL, 1); | ||
| 1036 | set_debugreg(0UL, 2); | ||
| 1037 | set_debugreg(0UL, 3); | ||
| 1038 | set_debugreg(0UL, 6); | ||
| 1039 | set_debugreg(0UL, 7); | ||
| 1040 | #ifdef CONFIG_KGDB | ||
| 1041 | /* If the kgdb is connected no debug regs should be altered. */ | ||
| 1042 | } | ||
| 1043 | #endif | ||
| 1044 | |||
| 1045 | fpu_init(); | ||
| 1046 | |||
| 1047 | raw_local_save_flags(kernel_eflags); | ||
| 1048 | |||
| 1049 | if (is_uv_system()) | ||
| 1050 | uv_cpu_init(); | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | #else | ||
| 1054 | |||
| 673 | void __cpuinit cpu_init(void) | 1055 | void __cpuinit cpu_init(void) |
| 674 | { | 1056 | { |
| 675 | int cpu = smp_processor_id(); | 1057 | int cpu = smp_processor_id(); |
| @@ -723,9 +1105,20 @@ void __cpuinit cpu_init(void) | |||
| 723 | /* | 1105 | /* |
| 724 | * Force FPU initialization: | 1106 | * Force FPU initialization: |
| 725 | */ | 1107 | */ |
| 726 | current_thread_info()->status = 0; | 1108 | if (cpu_has_xsave) |
| 1109 | current_thread_info()->status = TS_XSAVE; | ||
| 1110 | else | ||
| 1111 | current_thread_info()->status = 0; | ||
| 727 | clear_used_math(); | 1112 | clear_used_math(); |
| 728 | mxcsr_feature_mask_init(); | 1113 | mxcsr_feature_mask_init(); |
| 1114 | |||
| 1115 | /* | ||
| 1116 | * Boot processor to setup the FP and extended state context info. | ||
| 1117 | */ | ||
| 1118 | if (!smp_processor_id()) | ||
| 1119 | init_thread_xstate(); | ||
| 1120 | |||
| 1121 | xsave_init(); | ||
| 729 | } | 1122 | } |
| 730 | 1123 | ||
| 731 | #ifdef CONFIG_HOTPLUG_CPU | 1124 | #ifdef CONFIG_HOTPLUG_CPU |
| @@ -739,3 +1132,5 @@ void __cpuinit cpu_uninit(void) | |||
| 739 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | 1132 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; |
| 740 | } | 1133 | } |
| 741 | #endif | 1134 | #endif |
| 1135 | |||
| 1136 | #endif | ||
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c deleted file mode 100644 index 43f1aa51da5d..000000000000 --- a/arch/x86/kernel/cpu/common_64.c +++ /dev/null | |||
| @@ -1,765 +0,0 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/kernel.h> | ||
| 3 | #include <linux/sched.h> | ||
| 4 | #include <linux/string.h> | ||
| 5 | #include <linux/bootmem.h> | ||
| 6 | #include <linux/bitops.h> | ||
| 7 | #include <linux/module.h> | ||
| 8 | #include <linux/kgdb.h> | ||
| 9 | #include <linux/topology.h> | ||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/smp.h> | ||
| 12 | #include <linux/percpu.h> | ||
| 13 | #include <asm/i387.h> | ||
| 14 | #include <asm/msr.h> | ||
| 15 | #include <asm/io.h> | ||
| 16 | #include <asm/linkage.h> | ||
| 17 | #include <asm/mmu_context.h> | ||
| 18 | #include <asm/mtrr.h> | ||
| 19 | #include <asm/mce.h> | ||
| 20 | #include <asm/pat.h> | ||
| 21 | #include <asm/asm.h> | ||
| 22 | #include <asm/numa.h> | ||
| 23 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 24 | #include <asm/mpspec.h> | ||
| 25 | #include <asm/apic.h> | ||
| 26 | #include <mach_apic.h> | ||
| 27 | #endif | ||
| 28 | #include <asm/pda.h> | ||
| 29 | #include <asm/pgtable.h> | ||
| 30 | #include <asm/processor.h> | ||
| 31 | #include <asm/desc.h> | ||
| 32 | #include <asm/atomic.h> | ||
| 33 | #include <asm/proto.h> | ||
| 34 | #include <asm/sections.h> | ||
| 35 | #include <asm/setup.h> | ||
| 36 | #include <asm/genapic.h> | ||
| 37 | |||
| 38 | #include "cpu.h" | ||
| 39 | |||
| 40 | /* We need valid kernel segments for data and code in long mode too | ||
| 41 | * IRET will check the segment types kkeil 2000/10/28 | ||
| 42 | * Also sysret mandates a special GDT layout | ||
| 43 | */ | ||
| 44 | /* The TLS descriptors are currently at a different place compared to i386. | ||
| 45 | Hopefully nobody expects them at a fixed place (Wine?) */ | ||
| 46 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | ||
| 47 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | ||
| 48 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | ||
| 49 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | ||
| 50 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | ||
| 51 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | ||
| 52 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | ||
| 53 | } }; | ||
| 54 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | ||
| 55 | |||
| 56 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
| 57 | |||
| 58 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
| 59 | * it's on the real one. */ | ||
| 60 | void switch_to_new_gdt(void) | ||
| 61 | { | ||
| 62 | struct desc_ptr gdt_descr; | ||
| 63 | |||
| 64 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
| 65 | gdt_descr.size = GDT_SIZE - 1; | ||
| 66 | load_gdt(&gdt_descr); | ||
| 67 | } | ||
| 68 | |||
| 69 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
| 70 | |||
| 71 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | ||
| 72 | { | ||
| 73 | display_cacheinfo(c); | ||
| 74 | } | ||
| 75 | |||
| 76 | static struct cpu_dev __cpuinitdata default_cpu = { | ||
| 77 | .c_init = default_init, | ||
| 78 | .c_vendor = "Unknown", | ||
| 79 | }; | ||
| 80 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
| 81 | |||
| 82 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
| 83 | { | ||
| 84 | unsigned int *v; | ||
| 85 | |||
| 86 | if (c->extended_cpuid_level < 0x80000004) | ||
| 87 | return 0; | ||
| 88 | |||
| 89 | v = (unsigned int *) c->x86_model_id; | ||
| 90 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
| 91 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
| 92 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
| 93 | c->x86_model_id[48] = 0; | ||
| 94 | return 1; | ||
| 95 | } | ||
| 96 | |||
| 97 | |||
| 98 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | ||
| 99 | { | ||
| 100 | unsigned int n, dummy, ebx, ecx, edx; | ||
| 101 | |||
| 102 | n = c->extended_cpuid_level; | ||
| 103 | |||
| 104 | if (n >= 0x80000005) { | ||
| 105 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | ||
| 106 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | ||
| 107 | "D cache %dK (%d bytes/line)\n", | ||
| 108 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
| 109 | c->x86_cache_size = (ecx>>24) + (edx>>24); | ||
| 110 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
| 111 | c->x86_tlbsize = 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | if (n >= 0x80000006) { | ||
| 115 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | ||
| 116 | ecx = cpuid_ecx(0x80000006); | ||
| 117 | c->x86_cache_size = ecx >> 16; | ||
| 118 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
| 119 | |||
| 120 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
| 121 | c->x86_cache_size, ecx & 0xFF); | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 125 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
| 126 | { | ||
| 127 | #ifdef CONFIG_SMP | ||
| 128 | u32 eax, ebx, ecx, edx; | ||
| 129 | int index_msb, core_bits; | ||
| 130 | |||
| 131 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
| 132 | |||
| 133 | |||
| 134 | if (!cpu_has(c, X86_FEATURE_HT)) | ||
| 135 | return; | ||
| 136 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
| 137 | goto out; | ||
| 138 | |||
| 139 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
| 140 | |||
| 141 | if (smp_num_siblings == 1) { | ||
| 142 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
| 143 | } else if (smp_num_siblings > 1) { | ||
| 144 | |||
| 145 | if (smp_num_siblings > NR_CPUS) { | ||
| 146 | printk(KERN_WARNING "CPU: Unsupported number of " | ||
| 147 | "siblings %d", smp_num_siblings); | ||
| 148 | smp_num_siblings = 1; | ||
| 149 | return; | ||
| 150 | } | ||
| 151 | |||
| 152 | index_msb = get_count_order(smp_num_siblings); | ||
| 153 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
| 154 | |||
| 155 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
| 156 | |||
| 157 | index_msb = get_count_order(smp_num_siblings); | ||
| 158 | |||
| 159 | core_bits = get_count_order(c->x86_max_cores); | ||
| 160 | |||
| 161 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
| 162 | ((1 << core_bits) - 1); | ||
| 163 | } | ||
| 164 | out: | ||
| 165 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
| 166 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
| 167 | c->phys_proc_id); | ||
| 168 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
| 169 | c->cpu_core_id); | ||
| 170 | } | ||
| 171 | |||
| 172 | #endif | ||
| 173 | } | ||
| 174 | |||
| 175 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | ||
| 176 | { | ||
| 177 | char *v = c->x86_vendor_id; | ||
| 178 | int i; | ||
| 179 | static int printed; | ||
| 180 | |||
| 181 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
| 182 | if (cpu_devs[i]) { | ||
| 183 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | ||
| 184 | (cpu_devs[i]->c_ident[1] && | ||
| 185 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | ||
| 186 | c->x86_vendor = i; | ||
| 187 | this_cpu = cpu_devs[i]; | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | } | ||
| 192 | if (!printed) { | ||
| 193 | printed++; | ||
| 194 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | ||
| 195 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | ||
| 196 | } | ||
| 197 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
| 198 | } | ||
| 199 | |||
| 200 | static void __init early_cpu_support_print(void) | ||
| 201 | { | ||
| 202 | int i,j; | ||
| 203 | struct cpu_dev *cpu_devx; | ||
| 204 | |||
| 205 | printk("KERNEL supported cpus:\n"); | ||
| 206 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
| 207 | cpu_devx = cpu_devs[i]; | ||
| 208 | if (!cpu_devx) | ||
| 209 | continue; | ||
| 210 | for (j = 0; j < 2; j++) { | ||
| 211 | if (!cpu_devx->c_ident[j]) | ||
| 212 | continue; | ||
| 213 | printk(" %s %s\n", cpu_devx->c_vendor, | ||
| 214 | cpu_devx->c_ident[j]); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | } | ||
| 218 | |||
| 219 | /* | ||
| 220 | * The NOPL instruction is supposed to exist on all CPUs with | ||
| 221 | * family >= 6, unfortunately, that's not true in practice because | ||
| 222 | * of early VIA chips and (more importantly) broken virtualizers that | ||
| 223 | * are not easy to detect. Hence, probe for it based on first | ||
| 224 | * principles. | ||
| 225 | * | ||
| 226 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
| 227 | * for consistency with 32 bits, and to make it utterly trivial to | ||
| 228 | * diagnose the problem should it ever surface. | ||
| 229 | */ | ||
| 230 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
| 231 | { | ||
| 232 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
| 233 | u32 has_nopl = nopl_signature; | ||
| 234 | |||
| 235 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
| 236 | if (c->x86 >= 6) { | ||
| 237 | asm volatile("\n" | ||
| 238 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
| 239 | "2:\n" | ||
| 240 | " .section .fixup,\"ax\"\n" | ||
| 241 | "3: xor %0,%0\n" | ||
| 242 | " jmp 2b\n" | ||
| 243 | " .previous\n" | ||
| 244 | _ASM_EXTABLE(1b,3b) | ||
| 245 | : "+a" (has_nopl)); | ||
| 246 | |||
| 247 | if (has_nopl == nopl_signature) | ||
| 248 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
| 249 | } | ||
| 250 | } | ||
| 251 | |||
| 252 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
| 253 | |||
| 254 | void __init early_cpu_init(void) | ||
| 255 | { | ||
| 256 | struct cpu_vendor_dev *cvdev; | ||
| 257 | |||
| 258 | for (cvdev = __x86cpuvendor_start ; | ||
| 259 | cvdev < __x86cpuvendor_end ; | ||
| 260 | cvdev++) | ||
| 261 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
| 262 | early_cpu_support_print(); | ||
| 263 | early_identify_cpu(&boot_cpu_data); | ||
| 264 | } | ||
| 265 | |||
| 266 | /* Do some early cpuid on the boot CPU to get some parameter that are | ||
| 267 | needed before check_bugs. Everything advanced is in identify_cpu | ||
| 268 | below. */ | ||
| 269 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | ||
| 270 | { | ||
| 271 | u32 tfms, xlvl; | ||
| 272 | |||
| 273 | c->loops_per_jiffy = loops_per_jiffy; | ||
| 274 | c->x86_cache_size = -1; | ||
| 275 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
| 276 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
| 277 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
| 278 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
| 279 | c->x86_clflush_size = 64; | ||
| 280 | c->x86_cache_alignment = c->x86_clflush_size; | ||
| 281 | c->x86_max_cores = 1; | ||
| 282 | c->x86_coreid_bits = 0; | ||
| 283 | c->extended_cpuid_level = 0; | ||
| 284 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
| 285 | |||
| 286 | /* Get vendor name */ | ||
| 287 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
| 288 | (unsigned int *)&c->x86_vendor_id[0], | ||
| 289 | (unsigned int *)&c->x86_vendor_id[8], | ||
| 290 | (unsigned int *)&c->x86_vendor_id[4]); | ||
| 291 | |||
| 292 | get_cpu_vendor(c); | ||
| 293 | |||
| 294 | /* Initialize the standard set of capabilities */ | ||
| 295 | /* Note that the vendor-specific code below might override */ | ||
| 296 | |||
| 297 | /* Intel-defined flags: level 0x00000001 */ | ||
| 298 | if (c->cpuid_level >= 0x00000001) { | ||
| 299 | __u32 misc; | ||
| 300 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | ||
| 301 | &c->x86_capability[0]); | ||
| 302 | c->x86 = (tfms >> 8) & 0xf; | ||
| 303 | c->x86_model = (tfms >> 4) & 0xf; | ||
| 304 | c->x86_mask = tfms & 0xf; | ||
| 305 | if (c->x86 == 0xf) | ||
| 306 | c->x86 += (tfms >> 20) & 0xff; | ||
| 307 | if (c->x86 >= 0x6) | ||
| 308 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
| 309 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
| 310 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | ||
| 311 | } else { | ||
| 312 | /* Have CPUID level 0 only - unheard of */ | ||
| 313 | c->x86 = 4; | ||
| 314 | } | ||
| 315 | |||
| 316 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
| 317 | #ifdef CONFIG_SMP | ||
| 318 | c->phys_proc_id = c->initial_apicid; | ||
| 319 | #endif | ||
| 320 | /* AMD-defined flags: level 0x80000001 */ | ||
| 321 | xlvl = cpuid_eax(0x80000000); | ||
| 322 | c->extended_cpuid_level = xlvl; | ||
| 323 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
| 324 | if (xlvl >= 0x80000001) { | ||
| 325 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
| 326 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
| 327 | } | ||
| 328 | if (xlvl >= 0x80000004) | ||
| 329 | get_model_name(c); /* Default name */ | ||
| 330 | } | ||
| 331 | |||
| 332 | /* Transmeta-defined flags: level 0x80860001 */ | ||
| 333 | xlvl = cpuid_eax(0x80860000); | ||
| 334 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
| 335 | /* Don't set x86_cpuid_level here for now to not confuse. */ | ||
| 336 | if (xlvl >= 0x80860001) | ||
| 337 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
| 338 | } | ||
| 339 | |||
| 340 | if (c->extended_cpuid_level >= 0x80000007) | ||
| 341 | c->x86_power = cpuid_edx(0x80000007); | ||
| 342 | |||
| 343 | if (c->extended_cpuid_level >= 0x80000008) { | ||
| 344 | u32 eax = cpuid_eax(0x80000008); | ||
| 345 | |||
| 346 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
| 347 | c->x86_phys_bits = eax & 0xff; | ||
| 348 | } | ||
| 349 | |||
| 350 | detect_nopl(c); | ||
| 351 | |||
| 352 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | ||
| 353 | cpu_devs[c->x86_vendor]->c_early_init) | ||
| 354 | cpu_devs[c->x86_vendor]->c_early_init(c); | ||
| 355 | |||
| 356 | validate_pat_support(c); | ||
| 357 | } | ||
| 358 | |||
| 359 | /* | ||
| 360 | * This does the hard work of actually picking apart the CPU stuff... | ||
| 361 | */ | ||
| 362 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
| 363 | { | ||
| 364 | int i; | ||
| 365 | |||
| 366 | early_identify_cpu(c); | ||
| 367 | |||
| 368 | init_scattered_cpuid_features(c); | ||
| 369 | |||
| 370 | c->apicid = phys_pkg_id(0); | ||
| 371 | |||
| 372 | /* | ||
| 373 | * Vendor-specific initialization. In this section we | ||
| 374 | * canonicalize the feature flags, meaning if there are | ||
| 375 | * features a certain CPU supports which CPUID doesn't | ||
| 376 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
| 377 | * we handle them here. | ||
| 378 | * | ||
| 379 | * At the end of this section, c->x86_capability better | ||
| 380 | * indicate the features this CPU genuinely supports! | ||
| 381 | */ | ||
| 382 | if (this_cpu->c_init) | ||
| 383 | this_cpu->c_init(c); | ||
| 384 | |||
| 385 | detect_ht(c); | ||
| 386 | |||
| 387 | /* | ||
| 388 | * On SMP, boot_cpu_data holds the common feature set between | ||
| 389 | * all CPUs; so make sure that we indicate which features are | ||
| 390 | * common between the CPUs. The first time this routine gets | ||
| 391 | * executed, c == &boot_cpu_data. | ||
| 392 | */ | ||
| 393 | if (c != &boot_cpu_data) { | ||
| 394 | /* AND the already accumulated flags with these */ | ||
| 395 | for (i = 0; i < NCAPINTS; i++) | ||
| 396 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
| 397 | } | ||
| 398 | |||
| 399 | /* Clear all flags overriden by options */ | ||
| 400 | for (i = 0; i < NCAPINTS; i++) | ||
| 401 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | ||
| 402 | |||
| 403 | #ifdef CONFIG_X86_MCE | ||
| 404 | mcheck_init(c); | ||
| 405 | #endif | ||
| 406 | select_idle_routine(c); | ||
| 407 | |||
| 408 | #ifdef CONFIG_NUMA | ||
| 409 | numa_add_cpu(smp_processor_id()); | ||
| 410 | #endif | ||
| 411 | |||
| 412 | } | ||
| 413 | |||
| 414 | void __cpuinit identify_boot_cpu(void) | ||
| 415 | { | ||
| 416 | identify_cpu(&boot_cpu_data); | ||
| 417 | } | ||
| 418 | |||
| 419 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
| 420 | { | ||
| 421 | BUG_ON(c == &boot_cpu_data); | ||
| 422 | identify_cpu(c); | ||
| 423 | mtrr_ap_init(); | ||
| 424 | } | ||
| 425 | |||
| 426 | static __init int setup_noclflush(char *arg) | ||
| 427 | { | ||
| 428 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
| 429 | return 1; | ||
| 430 | } | ||
| 431 | __setup("noclflush", setup_noclflush); | ||
| 432 | |||
| 433 | struct msr_range { | ||
| 434 | unsigned min; | ||
| 435 | unsigned max; | ||
| 436 | }; | ||
| 437 | |||
| 438 | static struct msr_range msr_range_array[] __cpuinitdata = { | ||
| 439 | { 0x00000000, 0x00000418}, | ||
| 440 | { 0xc0000000, 0xc000040b}, | ||
| 441 | { 0xc0010000, 0xc0010142}, | ||
| 442 | { 0xc0011000, 0xc001103b}, | ||
| 443 | }; | ||
| 444 | |||
| 445 | static void __cpuinit print_cpu_msr(void) | ||
| 446 | { | ||
| 447 | unsigned index; | ||
| 448 | u64 val; | ||
| 449 | int i; | ||
| 450 | unsigned index_min, index_max; | ||
| 451 | |||
| 452 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
| 453 | index_min = msr_range_array[i].min; | ||
| 454 | index_max = msr_range_array[i].max; | ||
| 455 | for (index = index_min; index < index_max; index++) { | ||
| 456 | if (rdmsrl_amd_safe(index, &val)) | ||
| 457 | continue; | ||
| 458 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
| 459 | } | ||
| 460 | } | ||
| 461 | } | ||
| 462 | |||
| 463 | static int show_msr __cpuinitdata; | ||
| 464 | static __init int setup_show_msr(char *arg) | ||
| 465 | { | ||
| 466 | int num; | ||
| 467 | |||
| 468 | get_option(&arg, &num); | ||
| 469 | |||
| 470 | if (num > 0) | ||
| 471 | show_msr = num; | ||
| 472 | return 1; | ||
| 473 | } | ||
| 474 | __setup("show_msr=", setup_show_msr); | ||
| 475 | |||
| 476 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | ||
| 477 | { | ||
| 478 | if (c->x86_model_id[0]) | ||
| 479 | printk(KERN_CONT "%s", c->x86_model_id); | ||
| 480 | |||
| 481 | if (c->x86_mask || c->cpuid_level >= 0) | ||
| 482 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | ||
| 483 | else | ||
| 484 | printk(KERN_CONT "\n"); | ||
| 485 | |||
| 486 | #ifdef CONFIG_SMP | ||
| 487 | if (c->cpu_index < show_msr) | ||
| 488 | print_cpu_msr(); | ||
| 489 | #else | ||
| 490 | if (show_msr) | ||
| 491 | print_cpu_msr(); | ||
| 492 | #endif | ||
| 493 | } | ||
| 494 | |||
| 495 | static __init int setup_disablecpuid(char *arg) | ||
| 496 | { | ||
| 497 | int bit; | ||
| 498 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | ||
| 499 | setup_clear_cpu_cap(bit); | ||
| 500 | else | ||
| 501 | return 0; | ||
| 502 | return 1; | ||
| 503 | } | ||
| 504 | __setup("clearcpuid=", setup_disablecpuid); | ||
| 505 | |||
| 506 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | ||
| 507 | |||
| 508 | struct x8664_pda **_cpu_pda __read_mostly; | ||
| 509 | EXPORT_SYMBOL(_cpu_pda); | ||
| 510 | |||
| 511 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | ||
| 512 | |||
| 513 | char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | ||
| 514 | |||
| 515 | unsigned long __supported_pte_mask __read_mostly = ~0UL; | ||
| 516 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | ||
| 517 | |||
| 518 | static int do_not_nx __cpuinitdata; | ||
| 519 | |||
| 520 | /* noexec=on|off | ||
| 521 | Control non executable mappings for 64bit processes. | ||
| 522 | |||
| 523 | on Enable(default) | ||
| 524 | off Disable | ||
| 525 | */ | ||
| 526 | static int __init nonx_setup(char *str) | ||
| 527 | { | ||
| 528 | if (!str) | ||
| 529 | return -EINVAL; | ||
| 530 | if (!strncmp(str, "on", 2)) { | ||
| 531 | __supported_pte_mask |= _PAGE_NX; | ||
| 532 | do_not_nx = 0; | ||
| 533 | } else if (!strncmp(str, "off", 3)) { | ||
| 534 | do_not_nx = 1; | ||
| 535 | __supported_pte_mask &= ~_PAGE_NX; | ||
| 536 | } | ||
| 537 | return 0; | ||
| 538 | } | ||
| 539 | early_param("noexec", nonx_setup); | ||
| 540 | |||
| 541 | int force_personality32; | ||
| 542 | |||
| 543 | /* noexec32=on|off | ||
| 544 | Control non executable heap for 32bit processes. | ||
| 545 | To control the stack too use noexec=off | ||
| 546 | |||
| 547 | on PROT_READ does not imply PROT_EXEC for 32bit processes (default) | ||
| 548 | off PROT_READ implies PROT_EXEC | ||
| 549 | */ | ||
| 550 | static int __init nonx32_setup(char *str) | ||
| 551 | { | ||
| 552 | if (!strcmp(str, "on")) | ||
| 553 | force_personality32 &= ~READ_IMPLIES_EXEC; | ||
| 554 | else if (!strcmp(str, "off")) | ||
| 555 | force_personality32 |= READ_IMPLIES_EXEC; | ||
| 556 | return 1; | ||
| 557 | } | ||
| 558 | __setup("noexec32=", nonx32_setup); | ||
| 559 | |||
| 560 | void pda_init(int cpu) | ||
| 561 | { | ||
| 562 | struct x8664_pda *pda = cpu_pda(cpu); | ||
| 563 | |||
| 564 | /* Setup up data that may be needed in __get_free_pages early */ | ||
| 565 | loadsegment(fs, 0); | ||
| 566 | loadsegment(gs, 0); | ||
| 567 | /* Memory clobbers used to order PDA accessed */ | ||
| 568 | mb(); | ||
| 569 | wrmsrl(MSR_GS_BASE, pda); | ||
| 570 | mb(); | ||
| 571 | |||
| 572 | pda->cpunumber = cpu; | ||
| 573 | pda->irqcount = -1; | ||
| 574 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
| 575 | PDA_STACKOFFSET + THREAD_SIZE; | ||
| 576 | pda->active_mm = &init_mm; | ||
| 577 | pda->mmu_state = 0; | ||
| 578 | |||
| 579 | if (cpu == 0) { | ||
| 580 | /* others are initialized in smpboot.c */ | ||
| 581 | pda->pcurrent = &init_task; | ||
| 582 | pda->irqstackptr = boot_cpu_stack; | ||
| 583 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 584 | } else { | ||
| 585 | if (!pda->irqstackptr) { | ||
| 586 | pda->irqstackptr = (char *) | ||
| 587 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
| 588 | if (!pda->irqstackptr) | ||
| 589 | panic("cannot allocate irqstack for cpu %d", | ||
| 590 | cpu); | ||
| 591 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 592 | } | ||
| 593 | |||
| 594 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | ||
| 595 | pda->nodenumber = cpu_to_node(cpu); | ||
| 596 | } | ||
| 597 | } | ||
| 598 | |||
| 599 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | ||
| 600 | DEBUG_STKSZ] __page_aligned_bss; | ||
| 601 | |||
| 602 | extern asmlinkage void ignore_sysret(void); | ||
| 603 | |||
| 604 | /* May not be marked __init: used by software suspend */ | ||
| 605 | void syscall_init(void) | ||
| 606 | { | ||
| 607 | /* | ||
| 608 | * LSTAR and STAR live in a bit strange symbiosis. | ||
| 609 | * They both write to the same internal register. STAR allows to | ||
| 610 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | ||
| 611 | */ | ||
| 612 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | ||
| 613 | wrmsrl(MSR_LSTAR, system_call); | ||
| 614 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
| 615 | |||
| 616 | #ifdef CONFIG_IA32_EMULATION | ||
| 617 | syscall32_cpu_init(); | ||
| 618 | #endif | ||
| 619 | |||
| 620 | /* Flags to clear on syscall */ | ||
| 621 | wrmsrl(MSR_SYSCALL_MASK, | ||
| 622 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | ||
| 623 | } | ||
| 624 | |||
| 625 | void __cpuinit check_efer(void) | ||
| 626 | { | ||
| 627 | unsigned long efer; | ||
| 628 | |||
| 629 | rdmsrl(MSR_EFER, efer); | ||
| 630 | if (!(efer & EFER_NX) || do_not_nx) | ||
| 631 | __supported_pte_mask &= ~_PAGE_NX; | ||
| 632 | } | ||
| 633 | |||
| 634 | unsigned long kernel_eflags; | ||
| 635 | |||
| 636 | /* | ||
| 637 | * Copies of the original ist values from the tss are only accessed during | ||
| 638 | * debugging, no special alignment required. | ||
| 639 | */ | ||
| 640 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | ||
| 641 | |||
| 642 | /* | ||
| 643 | * cpu_init() initializes state that is per-CPU. Some data is already | ||
| 644 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
| 645 | * and IDT. We reload them nevertheless, this function acts as a | ||
| 646 | * 'CPU state barrier', nothing should get across. | ||
| 647 | * A lot of state is already set up in PDA init. | ||
| 648 | */ | ||
| 649 | void __cpuinit cpu_init(void) | ||
| 650 | { | ||
| 651 | int cpu = stack_smp_processor_id(); | ||
| 652 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
| 653 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
| 654 | unsigned long v; | ||
| 655 | char *estacks = NULL; | ||
| 656 | struct task_struct *me; | ||
| 657 | int i; | ||
| 658 | |||
| 659 | /* CPU 0 is initialised in head64.c */ | ||
| 660 | if (cpu != 0) | ||
| 661 | pda_init(cpu); | ||
| 662 | else | ||
| 663 | estacks = boot_exception_stacks; | ||
| 664 | |||
| 665 | me = current; | ||
| 666 | |||
| 667 | if (cpu_test_and_set(cpu, cpu_initialized)) | ||
| 668 | panic("CPU#%d already initialized!\n", cpu); | ||
| 669 | |||
| 670 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
| 671 | |||
| 672 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
| 673 | |||
| 674 | /* | ||
| 675 | * Initialize the per-CPU GDT with the boot GDT, | ||
| 676 | * and set up the GDT descriptor: | ||
| 677 | */ | ||
| 678 | |||
| 679 | switch_to_new_gdt(); | ||
| 680 | load_idt((const struct desc_ptr *)&idt_descr); | ||
| 681 | |||
| 682 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | ||
| 683 | syscall_init(); | ||
| 684 | |||
| 685 | wrmsrl(MSR_FS_BASE, 0); | ||
| 686 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | ||
| 687 | barrier(); | ||
| 688 | |||
| 689 | check_efer(); | ||
| 690 | if (cpu != 0 && x2apic) | ||
| 691 | enable_x2apic(); | ||
| 692 | |||
| 693 | /* | ||
| 694 | * set up and load the per-CPU TSS | ||
| 695 | */ | ||
| 696 | if (!orig_ist->ist[0]) { | ||
| 697 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
| 698 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
| 699 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
| 700 | }; | ||
| 701 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | ||
| 702 | if (cpu) { | ||
| 703 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
| 704 | if (!estacks) | ||
| 705 | panic("Cannot allocate exception " | ||
| 706 | "stack %ld %d\n", v, cpu); | ||
| 707 | } | ||
| 708 | estacks += PAGE_SIZE << order[v]; | ||
| 709 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
| 710 | (unsigned long)estacks; | ||
| 711 | } | ||
| 712 | } | ||
| 713 | |||
| 714 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
| 715 | /* | ||
| 716 | * <= is required because the CPU will access up to | ||
| 717 | * 8 bits beyond the end of the IO permission bitmap. | ||
| 718 | */ | ||
| 719 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | ||
| 720 | t->io_bitmap[i] = ~0UL; | ||
| 721 | |||
| 722 | atomic_inc(&init_mm.mm_count); | ||
| 723 | me->active_mm = &init_mm; | ||
| 724 | if (me->mm) | ||
| 725 | BUG(); | ||
| 726 | enter_lazy_tlb(&init_mm, me); | ||
| 727 | |||
| 728 | load_sp0(t, ¤t->thread); | ||
| 729 | set_tss_desc(cpu, t); | ||
| 730 | load_TR_desc(); | ||
| 731 | load_LDT(&init_mm.context); | ||
| 732 | |||
| 733 | #ifdef CONFIG_KGDB | ||
| 734 | /* | ||
| 735 | * If the kgdb is connected no debug regs should be altered. This | ||
| 736 | * is only applicable when KGDB and a KGDB I/O module are built | ||
| 737 | * into the kernel and you are using early debugging with | ||
| 738 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
| 739 | */ | ||
| 740 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
| 741 | arch_kgdb_ops.correct_hw_break(); | ||
| 742 | else { | ||
| 743 | #endif | ||
| 744 | /* | ||
| 745 | * Clear all 6 debug registers: | ||
| 746 | */ | ||
| 747 | |||
| 748 | set_debugreg(0UL, 0); | ||
| 749 | set_debugreg(0UL, 1); | ||
| 750 | set_debugreg(0UL, 2); | ||
| 751 | set_debugreg(0UL, 3); | ||
| 752 | set_debugreg(0UL, 6); | ||
| 753 | set_debugreg(0UL, 7); | ||
| 754 | #ifdef CONFIG_KGDB | ||
| 755 | /* If the kgdb is connected no debug regs should be altered. */ | ||
| 756 | } | ||
| 757 | #endif | ||
| 758 | |||
| 759 | fpu_init(); | ||
| 760 | |||
| 761 | raw_local_save_flags(kernel_eflags); | ||
| 762 | |||
| 763 | if (is_uv_system()) | ||
| 764 | uv_cpu_init(); | ||
| 765 | } | ||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 4d894e8565fe..de4094a39210 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
| @@ -21,23 +21,16 @@ struct cpu_dev { | |||
| 21 | void (*c_init)(struct cpuinfo_x86 * c); | 21 | void (*c_init)(struct cpuinfo_x86 * c); |
| 22 | void (*c_identify)(struct cpuinfo_x86 * c); | 22 | void (*c_identify)(struct cpuinfo_x86 * c); |
| 23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); | 23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); |
| 24 | int c_x86_vendor; | ||
| 24 | }; | 25 | }; |
| 25 | 26 | ||
| 26 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; | 27 | #define cpu_dev_register(cpu_devX) \ |
| 28 | static struct cpu_dev *__cpu_dev_##cpu_devX __used \ | ||
| 29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ | ||
| 30 | &cpu_devX; | ||
| 27 | 31 | ||
| 28 | struct cpu_vendor_dev { | 32 | extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; |
| 29 | int vendor; | ||
| 30 | struct cpu_dev *cpu_dev; | ||
| 31 | }; | ||
| 32 | |||
| 33 | #define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \ | ||
| 34 | static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \ | ||
| 35 | __attribute__((__section__(".x86cpuvendor.init"))) = \ | ||
| 36 | { cpu_vendor_id, cpu_dev } | ||
| 37 | |||
| 38 | extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[]; | ||
| 39 | 33 | ||
| 40 | extern int get_model_name(struct cpuinfo_x86 *c); | ||
| 41 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 34 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
| 42 | 35 | ||
| 43 | #endif | 36 | #endif |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 898a5a2002ed..ffd0f5ed071a 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -121,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void) | |||
| 121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
| 122 | 122 | ||
| 123 | /* Load/Store Serialize to mem access disable (=reorder it) */ | 123 | /* Load/Store Serialize to mem access disable (=reorder it) */ |
| 124 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); | 124 | setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); |
| 125 | /* set load/store serialize from 1GB to 4GB */ | 125 | /* set load/store serialize from 1GB to 4GB */ |
| 126 | ccr3 |= 0xe0; | 126 | ccr3 |= 0xe0; |
| 127 | setCx86(CX86_CCR3, ccr3); | 127 | setCx86(CX86_CCR3, ccr3); |
| @@ -132,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void) | |||
| 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
| 133 | 133 | ||
| 134 | /* CCR2 bit 2: unlock NW bit */ | 134 | /* CCR2 bit 2: unlock NW bit */ |
| 135 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); | 135 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); |
| 136 | /* set 'Not Write-through' */ | 136 | /* set 'Not Write-through' */ |
| 137 | write_cr0(read_cr0() | X86_CR0_NW); | 137 | write_cr0(read_cr0() | X86_CR0_NW); |
| 138 | /* CCR2 bit 2: lock NW bit and set WT1 */ | 138 | /* CCR2 bit 2: lock NW bit and set WT1 */ |
| 139 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); | 139 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | /* | 142 | /* |
| @@ -150,14 +150,14 @@ static void __cpuinit geode_configure(void) | |||
| 150 | local_irq_save(flags); | 150 | local_irq_save(flags); |
| 151 | 151 | ||
| 152 | /* Suspend on halt power saving and enable #SUSP pin */ | 152 | /* Suspend on halt power saving and enable #SUSP pin */ |
| 153 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); | 153 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); |
| 154 | 154 | ||
| 155 | ccr3 = getCx86(CX86_CCR3); | 155 | ccr3 = getCx86(CX86_CCR3); |
| 156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
| 157 | 157 | ||
| 158 | 158 | ||
| 159 | /* FPU fast, DTE cache, Mem bypass */ | 159 | /* FPU fast, DTE cache, Mem bypass */ |
| 160 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); | 160 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); |
| 161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
| 162 | 162 | ||
| 163 | set_cx86_memwb(); | 163 | set_cx86_memwb(); |
| @@ -291,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
| 291 | /* GXm supports extended cpuid levels 'ala' AMD */ | 291 | /* GXm supports extended cpuid levels 'ala' AMD */ |
| 292 | if (c->cpuid_level == 2) { | 292 | if (c->cpuid_level == 2) { |
| 293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ | 293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ |
| 294 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); | 294 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); |
| 295 | 295 | ||
| 296 | /* | 296 | /* |
| 297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 | 297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 |
| @@ -301,7 +301,6 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
| 301 | */ | 301 | */ |
| 302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) | 302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) |
| 303 | geode_configure(); | 303 | geode_configure(); |
| 304 | get_model_name(c); /* get CPU marketing name */ | ||
| 305 | return; | 304 | return; |
| 306 | } else { /* MediaGX */ | 305 | } else { /* MediaGX */ |
| 307 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; | 306 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; |
| @@ -314,7 +313,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
| 314 | if (dir1 > 7) { | 313 | if (dir1 > 7) { |
| 315 | dir0_msn++; /* M II */ | 314 | dir0_msn++; /* M II */ |
| 316 | /* Enable MMX extensions (App note 108) */ | 315 | /* Enable MMX extensions (App note 108) */ |
| 317 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); | 316 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); |
| 318 | } else { | 317 | } else { |
| 319 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ | 318 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
| 320 | } | 319 | } |
| @@ -429,7 +428,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
| 429 | local_irq_save(flags); | 428 | local_irq_save(flags); |
| 430 | ccr3 = getCx86(CX86_CCR3); | 429 | ccr3 = getCx86(CX86_CCR3); |
| 431 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 430 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
| 432 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */ | 431 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ |
| 433 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 432 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
| 434 | local_irq_restore(flags); | 433 | local_irq_restore(flags); |
| 435 | } | 434 | } |
| @@ -442,14 +441,16 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | |||
| 442 | .c_early_init = early_init_cyrix, | 441 | .c_early_init = early_init_cyrix, |
| 443 | .c_init = init_cyrix, | 442 | .c_init = init_cyrix, |
| 444 | .c_identify = cyrix_identify, | 443 | .c_identify = cyrix_identify, |
| 444 | .c_x86_vendor = X86_VENDOR_CYRIX, | ||
| 445 | }; | 445 | }; |
| 446 | 446 | ||
| 447 | cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); | 447 | cpu_dev_register(cyrix_cpu_dev); |
| 448 | 448 | ||
| 449 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 449 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { |
| 450 | .c_vendor = "NSC", | 450 | .c_vendor = "NSC", |
| 451 | .c_ident = { "Geode by NSC" }, | 451 | .c_ident = { "Geode by NSC" }, |
| 452 | .c_init = init_nsc, | 452 | .c_init = init_nsc, |
| 453 | .c_x86_vendor = X86_VENDOR_NSC, | ||
| 453 | }; | 454 | }; |
| 454 | 455 | ||
| 455 | cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); | 456 | cpu_dev_register(nsc_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c deleted file mode 100644 index b96b69545fbf..000000000000 --- a/arch/x86/kernel/cpu/feature_names.c +++ /dev/null | |||
| @@ -1,84 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Strings for the various x86 capability flags. | ||
| 3 | * | ||
| 4 | * This file must not contain any executable code. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <asm/cpufeature.h> | ||
| 8 | |||
| 9 | /* | ||
| 10 | * These flag bits must match the definitions in <asm/cpufeature.h>. | ||
| 11 | * NULL means this bit is undefined or reserved; either way it doesn't | ||
| 12 | * have meaning as far as Linux is concerned. Note that it's important | ||
| 13 | * to realize there is a difference between this table and CPUID -- if | ||
| 14 | * applications want to get the raw CPUID data, they should access | ||
| 15 | * /dev/cpu/<cpu_nr>/cpuid instead. | ||
| 16 | */ | ||
| 17 | const char * const x86_cap_flags[NCAPINTS*32] = { | ||
| 18 | /* Intel-defined */ | ||
| 19 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | ||
| 20 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | ||
| 21 | "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", | ||
| 22 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", | ||
| 23 | |||
| 24 | /* AMD-defined */ | ||
| 25 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 26 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | ||
| 27 | NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, | ||
| 28 | NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", | ||
| 29 | "3dnowext", "3dnow", | ||
| 30 | |||
| 31 | /* Transmeta-defined */ | ||
| 32 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, | ||
| 33 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 34 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 35 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 36 | |||
| 37 | /* Other (Linux-defined) */ | ||
| 38 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | ||
| 39 | NULL, NULL, NULL, NULL, | ||
| 40 | "constant_tsc", "up", NULL, "arch_perfmon", | ||
| 41 | "pebs", "bts", NULL, NULL, | ||
| 42 | "rep_good", NULL, NULL, NULL, | ||
| 43 | "nopl", NULL, NULL, NULL, | ||
| 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 45 | |||
| 46 | /* Intel-defined (#2) */ | ||
| 47 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | ||
| 48 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, | ||
| 49 | NULL, NULL, "dca", "sse4_1", "sse4_2", "x2apic", NULL, "popcnt", | ||
| 50 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 51 | |||
| 52 | /* VIA/Cyrix/Centaur-defined */ | ||
| 53 | NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", | ||
| 54 | "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, | ||
| 55 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 56 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 57 | |||
| 58 | /* AMD-defined (#2) */ | ||
| 59 | "lahf_lm", "cmp_legacy", "svm", "extapic", | ||
| 60 | "cr8_legacy", "abm", "sse4a", "misalignsse", | ||
| 61 | "3dnowprefetch", "osvw", "ibs", "sse5", | ||
| 62 | "skinit", "wdt", NULL, NULL, | ||
| 63 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 64 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 65 | |||
| 66 | /* Auxiliary (Linux-defined) */ | ||
| 67 | "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 68 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 69 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 70 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
| 71 | }; | ||
| 72 | |||
| 73 | const char *const x86_power_flags[32] = { | ||
| 74 | "ts", /* temperature sensor */ | ||
| 75 | "fid", /* frequency id control */ | ||
| 76 | "vid", /* voltage id control */ | ||
| 77 | "ttp", /* thermal trip */ | ||
| 78 | "tm", | ||
| 79 | "stc", | ||
| 80 | "100mhzsteps", | ||
| 81 | "hwpstate", | ||
| 82 | "", /* tsc invariant mapped to constant_tsc */ | ||
| 83 | /* nothing */ | ||
| 84 | }; | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index f113ef4595f6..99468dbd08da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -15,6 +15,11 @@ | |||
| 15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
| 16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_X86_64 | ||
| 19 | #include <asm/topology.h> | ||
| 20 | #include <asm/numa_64.h> | ||
| 21 | #endif | ||
| 22 | |||
| 18 | #include "cpu.h" | 23 | #include "cpu.h" |
| 19 | 24 | ||
| 20 | #ifdef CONFIG_X86_LOCAL_APIC | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
| @@ -23,23 +28,22 @@ | |||
| 23 | #include <mach_apic.h> | 28 | #include <mach_apic.h> |
| 24 | #endif | 29 | #endif |
| 25 | 30 | ||
| 26 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
| 27 | /* | ||
| 28 | * Alignment at which movsl is preferred for bulk memory copies. | ||
| 29 | */ | ||
| 30 | struct movsl_mask movsl_mask __read_mostly; | ||
| 31 | #endif | ||
| 32 | |||
| 33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
| 34 | { | 32 | { |
| 35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
| 36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
| 37 | c->x86_cache_alignment = 128; | ||
| 38 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 33 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
| 39 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 34 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
| 40 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 35 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
| 36 | |||
| 37 | #ifdef CONFIG_X86_64 | ||
| 38 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
| 39 | #else | ||
| 40 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
| 41 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
| 42 | c->x86_cache_alignment = 128; | ||
| 43 | #endif | ||
| 41 | } | 44 | } |
| 42 | 45 | ||
| 46 | #ifdef CONFIG_X86_32 | ||
| 43 | /* | 47 | /* |
| 44 | * Early probe support logic for ppro memory erratum #50 | 48 | * Early probe support logic for ppro memory erratum #50 |
| 45 | * | 49 | * |
| @@ -59,15 +63,54 @@ int __cpuinit ppro_with_ram_bug(void) | |||
| 59 | return 0; | 63 | return 0; |
| 60 | } | 64 | } |
| 61 | 65 | ||
| 66 | #ifdef CONFIG_X86_F00F_BUG | ||
| 67 | static void __cpuinit trap_init_f00f_bug(void) | ||
| 68 | { | ||
| 69 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
| 62 | 70 | ||
| 63 | /* | 71 | /* |
| 64 | * P4 Xeon errata 037 workaround. | 72 | * Update the IDT descriptor and reload the IDT so that |
| 65 | * Hardware prefetcher may cause stale data to be loaded into the cache. | 73 | * it uses the read-only mapped virtual address. |
| 66 | */ | 74 | */ |
| 67 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | 75 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); |
| 76 | load_idt(&idt_descr); | ||
| 77 | } | ||
| 78 | #endif | ||
| 79 | |||
| 80 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | ||
| 68 | { | 81 | { |
| 69 | unsigned long lo, hi; | 82 | unsigned long lo, hi; |
| 70 | 83 | ||
| 84 | #ifdef CONFIG_X86_F00F_BUG | ||
| 85 | /* | ||
| 86 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
| 87 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
| 88 | * Note that the workaround only should be initialized once... | ||
| 89 | */ | ||
| 90 | c->f00f_bug = 0; | ||
| 91 | if (!paravirt_enabled() && c->x86 == 5) { | ||
| 92 | static int f00f_workaround_enabled; | ||
| 93 | |||
| 94 | c->f00f_bug = 1; | ||
| 95 | if (!f00f_workaround_enabled) { | ||
| 96 | trap_init_f00f_bug(); | ||
| 97 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
| 98 | f00f_workaround_enabled = 1; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | #endif | ||
| 102 | |||
| 103 | /* | ||
| 104 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | ||
| 105 | * model 3 mask 3 | ||
| 106 | */ | ||
| 107 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | ||
| 108 | clear_cpu_cap(c, X86_FEATURE_SEP); | ||
| 109 | |||
| 110 | /* | ||
| 111 | * P4 Xeon errata 037 workaround. | ||
| 112 | * Hardware prefetcher may cause stale data to be loaded into the cache. | ||
| 113 | */ | ||
| 71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 114 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
| 72 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); | 115 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
| 73 | if ((lo & (1<<9)) == 0) { | 116 | if ((lo & (1<<9)) == 0) { |
| @@ -77,13 +120,68 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
| 77 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 120 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
| 78 | } | 121 | } |
| 79 | } | 122 | } |
| 123 | |||
| 124 | /* | ||
| 125 | * See if we have a good local APIC by checking for buggy Pentia, | ||
| 126 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
| 127 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
| 128 | * Specification Update"). | ||
| 129 | */ | ||
| 130 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | ||
| 131 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | ||
| 132 | set_cpu_cap(c, X86_FEATURE_11AP); | ||
| 133 | |||
| 134 | |||
| 135 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
| 136 | /* | ||
| 137 | * Set up the preferred alignment for movsl bulk memory moves | ||
| 138 | */ | ||
| 139 | switch (c->x86) { | ||
| 140 | case 4: /* 486: untested */ | ||
| 141 | break; | ||
| 142 | case 5: /* Old Pentia: untested */ | ||
| 143 | break; | ||
| 144 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
| 145 | movsl_mask.mask = 7; | ||
| 146 | break; | ||
| 147 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
| 148 | movsl_mask.mask = 7; | ||
| 149 | break; | ||
| 150 | } | ||
| 151 | #endif | ||
| 152 | |||
| 153 | #ifdef CONFIG_X86_NUMAQ | ||
| 154 | numaq_tsc_disable(); | ||
| 155 | #endif | ||
| 80 | } | 156 | } |
| 157 | #else | ||
| 158 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | ||
| 159 | { | ||
| 160 | } | ||
| 161 | #endif | ||
| 81 | 162 | ||
| 163 | static void __cpuinit srat_detect_node(void) | ||
| 164 | { | ||
| 165 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
| 166 | unsigned node; | ||
| 167 | int cpu = smp_processor_id(); | ||
| 168 | int apicid = hard_smp_processor_id(); | ||
| 169 | |||
| 170 | /* Don't do the funky fallback heuristics the AMD version employs | ||
| 171 | for now. */ | ||
| 172 | node = apicid_to_node[apicid]; | ||
| 173 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
| 174 | node = first_node(node_online_map); | ||
| 175 | numa_set_node(cpu, node); | ||
| 176 | |||
| 177 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
| 178 | #endif | ||
| 179 | } | ||
| 82 | 180 | ||
| 83 | /* | 181 | /* |
| 84 | * find out the number of processor cores on the die | 182 | * find out the number of processor cores on the die |
| 85 | */ | 183 | */ |
| 86 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | 184 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) |
| 87 | { | 185 | { |
| 88 | unsigned int eax, ebx, ecx, edx; | 186 | unsigned int eax, ebx, ecx, edx; |
| 89 | 187 | ||
| @@ -98,45 +196,51 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
| 98 | return 1; | 196 | return 1; |
| 99 | } | 197 | } |
| 100 | 198 | ||
| 101 | #ifdef CONFIG_X86_F00F_BUG | 199 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) |
| 102 | static void __cpuinit trap_init_f00f_bug(void) | ||
| 103 | { | 200 | { |
| 104 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | 201 | /* Intel VMX MSR indicated features */ |
| 105 | 202 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
| 106 | /* | 203 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 |
| 107 | * Update the IDT descriptor and reload the IDT so that | 204 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 |
| 108 | * it uses the read-only mapped virtual address. | 205 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 |
| 109 | */ | 206 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 |
| 110 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | 207 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 |
| 111 | load_idt(&idt_descr); | 208 | |
| 209 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | ||
| 210 | |||
| 211 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | ||
| 212 | clear_cpu_cap(c, X86_FEATURE_VNMI); | ||
| 213 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | ||
| 214 | clear_cpu_cap(c, X86_FEATURE_EPT); | ||
| 215 | clear_cpu_cap(c, X86_FEATURE_VPID); | ||
| 216 | |||
| 217 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | ||
| 218 | msr_ctl = vmx_msr_high | vmx_msr_low; | ||
| 219 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | ||
| 220 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | ||
| 221 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | ||
| 222 | set_cpu_cap(c, X86_FEATURE_VNMI); | ||
| 223 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | ||
| 224 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | ||
| 225 | vmx_msr_low, vmx_msr_high); | ||
| 226 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | ||
| 227 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | ||
| 228 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | ||
| 229 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | ||
| 230 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | ||
| 231 | set_cpu_cap(c, X86_FEATURE_EPT); | ||
| 232 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | ||
| 233 | set_cpu_cap(c, X86_FEATURE_VPID); | ||
| 234 | } | ||
| 112 | } | 235 | } |
| 113 | #endif | ||
| 114 | 236 | ||
| 115 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 237 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
| 116 | { | 238 | { |
| 117 | unsigned int l2 = 0; | 239 | unsigned int l2 = 0; |
| 118 | char *p = NULL; | ||
| 119 | 240 | ||
| 120 | early_init_intel(c); | 241 | early_init_intel(c); |
| 121 | 242 | ||
| 122 | #ifdef CONFIG_X86_F00F_BUG | 243 | intel_workarounds(c); |
| 123 | /* | ||
| 124 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
| 125 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
| 126 | * Note that the workaround only should be initialized once... | ||
| 127 | */ | ||
| 128 | c->f00f_bug = 0; | ||
| 129 | if (!paravirt_enabled() && c->x86 == 5) { | ||
| 130 | static int f00f_workaround_enabled; | ||
| 131 | |||
| 132 | c->f00f_bug = 1; | ||
| 133 | if (!f00f_workaround_enabled) { | ||
| 134 | trap_init_f00f_bug(); | ||
| 135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
| 136 | f00f_workaround_enabled = 1; | ||
| 137 | } | ||
| 138 | } | ||
| 139 | #endif | ||
| 140 | 244 | ||
| 141 | l2 = init_intel_cacheinfo(c); | 245 | l2 = init_intel_cacheinfo(c); |
| 142 | if (c->cpuid_level > 9) { | 246 | if (c->cpuid_level > 9) { |
| @@ -146,16 +250,32 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
| 146 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | 250 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
| 147 | } | 251 | } |
| 148 | 252 | ||
| 149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | 253 | if (cpu_has_xmm2) |
| 150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | 254 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
| 151 | clear_cpu_cap(c, X86_FEATURE_SEP); | 255 | if (cpu_has_ds) { |
| 256 | unsigned int l1; | ||
| 257 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
| 258 | if (!(l1 & (1<<11))) | ||
| 259 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
| 260 | if (!(l1 & (1<<12))) | ||
| 261 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
| 262 | ds_init_intel(c); | ||
| 263 | } | ||
| 152 | 264 | ||
| 265 | #ifdef CONFIG_X86_64 | ||
| 266 | if (c->x86 == 15) | ||
| 267 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
| 268 | if (c->x86 == 6) | ||
| 269 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 270 | #else | ||
| 153 | /* | 271 | /* |
| 154 | * Names for the Pentium II/Celeron processors | 272 | * Names for the Pentium II/Celeron processors |
| 155 | * detectable only by also checking the cache size. | 273 | * detectable only by also checking the cache size. |
| 156 | * Dixon is NOT a Celeron. | 274 | * Dixon is NOT a Celeron. |
| 157 | */ | 275 | */ |
| 158 | if (c->x86 == 6) { | 276 | if (c->x86 == 6) { |
| 277 | char *p = NULL; | ||
| 278 | |||
| 159 | switch (c->x86_model) { | 279 | switch (c->x86_model) { |
| 160 | case 5: | 280 | case 5: |
| 161 | if (c->x86_mask == 0) { | 281 | if (c->x86_mask == 0) { |
| @@ -178,71 +298,41 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
| 178 | p = "Celeron (Coppermine)"; | 298 | p = "Celeron (Coppermine)"; |
| 179 | break; | 299 | break; |
| 180 | } | 300 | } |
| 181 | } | ||
| 182 | |||
| 183 | if (p) | ||
| 184 | strcpy(c->x86_model_id, p); | ||
| 185 | |||
| 186 | c->x86_max_cores = num_cpu_cores(c); | ||
| 187 | |||
| 188 | detect_ht(c); | ||
| 189 | 301 | ||
| 190 | /* Work around errata */ | 302 | if (p) |
| 191 | Intel_errata_workarounds(c); | 303 | strcpy(c->x86_model_id, p); |
| 192 | |||
| 193 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
| 194 | /* | ||
| 195 | * Set up the preferred alignment for movsl bulk memory moves | ||
| 196 | */ | ||
| 197 | switch (c->x86) { | ||
| 198 | case 4: /* 486: untested */ | ||
| 199 | break; | ||
| 200 | case 5: /* Old Pentia: untested */ | ||
| 201 | break; | ||
| 202 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
| 203 | movsl_mask.mask = 7; | ||
| 204 | break; | ||
| 205 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
| 206 | movsl_mask.mask = 7; | ||
| 207 | break; | ||
| 208 | } | 304 | } |
| 209 | #endif | ||
| 210 | 305 | ||
| 211 | if (cpu_has_xmm2) | 306 | if (c->x86 == 15) |
| 212 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
| 213 | if (c->x86 == 15) { | ||
| 214 | set_cpu_cap(c, X86_FEATURE_P4); | 307 | set_cpu_cap(c, X86_FEATURE_P4); |
| 215 | } | ||
| 216 | if (c->x86 == 6) | 308 | if (c->x86 == 6) |
| 217 | set_cpu_cap(c, X86_FEATURE_P3); | 309 | set_cpu_cap(c, X86_FEATURE_P3); |
| 218 | if (cpu_has_ds) { | ||
| 219 | unsigned int l1; | ||
| 220 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
| 221 | if (!(l1 & (1<<11))) | ||
| 222 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
| 223 | if (!(l1 & (1<<12))) | ||
| 224 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
| 225 | ds_init_intel(c); | ||
| 226 | } | ||
| 227 | 310 | ||
| 228 | if (cpu_has_bts) | 311 | if (cpu_has_bts) |
| 229 | ptrace_bts_init_intel(c); | 312 | ptrace_bts_init_intel(c); |
| 230 | 313 | ||
| 231 | /* | 314 | #endif |
| 232 | * See if we have a good local APIC by checking for buggy Pentia, | ||
| 233 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
| 234 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
| 235 | * Specification Update"). | ||
| 236 | */ | ||
| 237 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | ||
| 238 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | ||
| 239 | set_cpu_cap(c, X86_FEATURE_11AP); | ||
| 240 | 315 | ||
| 241 | #ifdef CONFIG_X86_NUMAQ | 316 | detect_extended_topology(c); |
| 242 | numaq_tsc_disable(); | 317 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
| 318 | /* | ||
| 319 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
| 320 | * detection. | ||
| 321 | */ | ||
| 322 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
| 323 | #ifdef CONFIG_X86_32 | ||
| 324 | detect_ht(c); | ||
| 243 | #endif | 325 | #endif |
| 326 | } | ||
| 327 | |||
| 328 | /* Work around errata */ | ||
| 329 | srat_detect_node(); | ||
| 330 | |||
| 331 | if (cpu_has(c, X86_FEATURE_VMX)) | ||
| 332 | detect_vmx_virtcap(c); | ||
| 244 | } | 333 | } |
| 245 | 334 | ||
| 335 | #ifdef CONFIG_X86_32 | ||
| 246 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 336 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
| 247 | { | 337 | { |
| 248 | /* | 338 | /* |
| @@ -255,10 +345,12 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
| 255 | size = 256; | 345 | size = 256; |
| 256 | return size; | 346 | return size; |
| 257 | } | 347 | } |
| 348 | #endif | ||
| 258 | 349 | ||
| 259 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 350 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
| 260 | .c_vendor = "Intel", | 351 | .c_vendor = "Intel", |
| 261 | .c_ident = { "GenuineIntel" }, | 352 | .c_ident = { "GenuineIntel" }, |
| 353 | #ifdef CONFIG_X86_32 | ||
| 262 | .c_models = { | 354 | .c_models = { |
| 263 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | 355 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
| 264 | { | 356 | { |
| @@ -308,76 +400,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
| 308 | } | 400 | } |
| 309 | }, | 401 | }, |
| 310 | }, | 402 | }, |
| 403 | .c_size_cache = intel_size_cache, | ||
| 404 | #endif | ||
| 311 | .c_early_init = early_init_intel, | 405 | .c_early_init = early_init_intel, |
| 312 | .c_init = init_intel, | 406 | .c_init = init_intel, |
| 313 | .c_size_cache = intel_size_cache, | 407 | .c_x86_vendor = X86_VENDOR_INTEL, |
| 314 | }; | 408 | }; |
| 315 | 409 | ||
| 316 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | 410 | cpu_dev_register(intel_cpu_dev); |
| 317 | |||
| 318 | #ifndef CONFIG_X86_CMPXCHG | ||
| 319 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
| 320 | { | ||
| 321 | u8 prev; | ||
| 322 | unsigned long flags; | ||
| 323 | |||
| 324 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
| 325 | local_irq_save(flags); | ||
| 326 | prev = *(u8 *)ptr; | ||
| 327 | if (prev == old) | ||
| 328 | *(u8 *)ptr = new; | ||
| 329 | local_irq_restore(flags); | ||
| 330 | return prev; | ||
| 331 | } | ||
| 332 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
| 333 | |||
| 334 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
| 335 | { | ||
| 336 | u16 prev; | ||
| 337 | unsigned long flags; | ||
| 338 | |||
| 339 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
| 340 | local_irq_save(flags); | ||
| 341 | prev = *(u16 *)ptr; | ||
| 342 | if (prev == old) | ||
| 343 | *(u16 *)ptr = new; | ||
| 344 | local_irq_restore(flags); | ||
| 345 | return prev; | ||
| 346 | } | ||
| 347 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
| 348 | |||
| 349 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
| 350 | { | ||
| 351 | u32 prev; | ||
| 352 | unsigned long flags; | ||
| 353 | |||
| 354 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
| 355 | local_irq_save(flags); | ||
| 356 | prev = *(u32 *)ptr; | ||
| 357 | if (prev == old) | ||
| 358 | *(u32 *)ptr = new; | ||
| 359 | local_irq_restore(flags); | ||
| 360 | return prev; | ||
| 361 | } | ||
| 362 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
| 363 | #endif | ||
| 364 | |||
| 365 | #ifndef CONFIG_X86_CMPXCHG64 | ||
| 366 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
| 367 | { | ||
| 368 | u64 prev; | ||
| 369 | unsigned long flags; | ||
| 370 | |||
| 371 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
| 372 | local_irq_save(flags); | ||
| 373 | prev = *(u64 *)ptr; | ||
| 374 | if (prev == old) | ||
| 375 | *(u64 *)ptr = new; | ||
| 376 | local_irq_restore(flags); | ||
| 377 | return prev; | ||
| 378 | } | ||
| 379 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
| 380 | #endif | ||
| 381 | |||
| 382 | /* arch_initcall(intel_cpu_init); */ | ||
| 383 | 411 | ||
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c deleted file mode 100644 index 1019c58d39f0..000000000000 --- a/arch/x86/kernel/cpu/intel_64.c +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/smp.h> | ||
| 3 | #include <asm/processor.h> | ||
| 4 | #include <asm/ptrace.h> | ||
| 5 | #include <asm/topology.h> | ||
| 6 | #include <asm/numa_64.h> | ||
| 7 | |||
| 8 | #include "cpu.h" | ||
| 9 | |||
| 10 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
| 11 | { | ||
| 12 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
| 13 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
| 14 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
| 15 | |||
| 16 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
| 17 | } | ||
| 18 | |||
| 19 | /* | ||
| 20 | * find out the number of processor cores on the die | ||
| 21 | */ | ||
| 22 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | ||
| 23 | { | ||
| 24 | unsigned int eax, t; | ||
| 25 | |||
| 26 | if (c->cpuid_level < 4) | ||
| 27 | return 1; | ||
| 28 | |||
| 29 | cpuid_count(4, 0, &eax, &t, &t, &t); | ||
| 30 | |||
| 31 | if (eax & 0x1f) | ||
| 32 | return ((eax >> 26) + 1); | ||
| 33 | else | ||
| 34 | return 1; | ||
| 35 | } | ||
| 36 | |||
| 37 | static void __cpuinit srat_detect_node(void) | ||
| 38 | { | ||
| 39 | #ifdef CONFIG_NUMA | ||
| 40 | unsigned node; | ||
| 41 | int cpu = smp_processor_id(); | ||
| 42 | int apicid = hard_smp_processor_id(); | ||
| 43 | |||
| 44 | /* Don't do the funky fallback heuristics the AMD version employs | ||
| 45 | for now. */ | ||
| 46 | node = apicid_to_node[apicid]; | ||
| 47 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
| 48 | node = first_node(node_online_map); | ||
| 49 | numa_set_node(cpu, node); | ||
| 50 | |||
| 51 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
| 52 | #endif | ||
| 53 | } | ||
| 54 | |||
| 55 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
| 56 | { | ||
| 57 | init_intel_cacheinfo(c); | ||
| 58 | if (c->cpuid_level > 9) { | ||
| 59 | unsigned eax = cpuid_eax(10); | ||
| 60 | /* Check for version and the number of counters */ | ||
| 61 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
| 62 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | ||
| 63 | } | ||
| 64 | |||
| 65 | if (cpu_has_ds) { | ||
| 66 | unsigned int l1, l2; | ||
| 67 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
| 68 | if (!(l1 & (1<<11))) | ||
| 69 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
| 70 | if (!(l1 & (1<<12))) | ||
| 71 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
| 72 | } | ||
| 73 | |||
| 74 | |||
| 75 | if (cpu_has_bts) | ||
| 76 | ds_init_intel(c); | ||
| 77 | |||
| 78 | if (c->x86 == 15) | ||
| 79 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
| 80 | if (c->x86 == 6) | ||
| 81 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 82 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
| 83 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
| 84 | |||
| 85 | srat_detect_node(); | ||
| 86 | } | ||
| 87 | |||
| 88 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | ||
| 89 | .c_vendor = "Intel", | ||
| 90 | .c_ident = { "GenuineIntel" }, | ||
| 91 | .c_early_init = early_init_intel, | ||
| 92 | .c_init = init_intel, | ||
| 93 | }; | ||
| 94 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | ||
| 95 | |||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b0a10b002f1..3f46afbb1cf1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Routines to indentify caches on Intel CPU. | 2 | * Routines to indentify caches on Intel CPU. |
| 3 | * | 3 | * |
| 4 | * Changes: | 4 | * Changes: |
| 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
| 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
| 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
| 8 | */ | 8 | */ |
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
| 14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
| 15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
| 16 | #include <linux/pci.h> | ||
| 16 | 17 | ||
| 17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
| 18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
| @@ -130,9 +131,18 @@ struct _cpuid4_info { | |||
| 130 | union _cpuid4_leaf_ebx ebx; | 131 | union _cpuid4_leaf_ebx ebx; |
| 131 | union _cpuid4_leaf_ecx ecx; | 132 | union _cpuid4_leaf_ecx ecx; |
| 132 | unsigned long size; | 133 | unsigned long size; |
| 134 | unsigned long can_disable; | ||
| 133 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 135 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ |
| 134 | }; | 136 | }; |
| 135 | 137 | ||
| 138 | #ifdef CONFIG_PCI | ||
| 139 | static struct pci_device_id k8_nb_id[] = { | ||
| 140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | ||
| 141 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | ||
| 142 | {} | ||
| 143 | }; | ||
| 144 | #endif | ||
| 145 | |||
| 136 | unsigned short num_cache_leaves; | 146 | unsigned short num_cache_leaves; |
| 137 | 147 | ||
| 138 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 148 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
| @@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = { | |||
| 182 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 192 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; |
| 183 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 193 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; |
| 184 | 194 | ||
| 185 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 195 | static void __cpuinit |
| 186 | union _cpuid4_leaf_ebx *ebx, | 196 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
| 187 | union _cpuid4_leaf_ecx *ecx) | 197 | union _cpuid4_leaf_ebx *ebx, |
| 198 | union _cpuid4_leaf_ecx *ecx) | ||
| 188 | { | 199 | { |
| 189 | unsigned dummy; | 200 | unsigned dummy; |
| 190 | unsigned line_size, lines_per_tag, assoc, size_in_kb; | 201 | unsigned line_size, lines_per_tag, assoc, size_in_kb; |
| @@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| 251 | (ebx->split.ways_of_associativity + 1) - 1; | 262 | (ebx->split.ways_of_associativity + 1) - 1; |
| 252 | } | 263 | } |
| 253 | 264 | ||
| 254 | static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 265 | static void __cpuinit |
| 266 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | ||
| 267 | { | ||
| 268 | if (index < 3) | ||
| 269 | return; | ||
| 270 | this_leaf->can_disable = 1; | ||
| 271 | } | ||
| 272 | |||
| 273 | static int | ||
| 274 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 255 | { | 275 | { |
| 256 | union _cpuid4_leaf_eax eax; | 276 | union _cpuid4_leaf_eax eax; |
| 257 | union _cpuid4_leaf_ebx ebx; | 277 | union _cpuid4_leaf_ebx ebx; |
| 258 | union _cpuid4_leaf_ecx ecx; | 278 | union _cpuid4_leaf_ecx ecx; |
| 259 | unsigned edx; | 279 | unsigned edx; |
| 260 | 280 | ||
| 261 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | 281 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
| 262 | amd_cpuid4(index, &eax, &ebx, &ecx); | 282 | amd_cpuid4(index, &eax, &ebx, &ecx); |
| 263 | else | 283 | if (boot_cpu_data.x86 >= 0x10) |
| 264 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 284 | amd_check_l3_disable(index, this_leaf); |
| 285 | } else { | ||
| 286 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | ||
| 287 | } | ||
| 288 | |||
| 265 | if (eax.split.type == CACHE_TYPE_NULL) | 289 | if (eax.split.type == CACHE_TYPE_NULL) |
| 266 | return -EIO; /* better error ? */ | 290 | return -EIO; /* better error ? */ |
| 267 | 291 | ||
| 268 | this_leaf->eax = eax; | 292 | this_leaf->eax = eax; |
| 269 | this_leaf->ebx = ebx; | 293 | this_leaf->ebx = ebx; |
| 270 | this_leaf->ecx = ecx; | 294 | this_leaf->ecx = ecx; |
| 271 | this_leaf->size = (ecx.split.number_of_sets + 1) * | 295 | this_leaf->size = (ecx.split.number_of_sets + 1) * |
| 272 | (ebx.split.coherency_line_size + 1) * | 296 | (ebx.split.coherency_line_size + 1) * |
| 273 | (ebx.split.physical_line_partition + 1) * | 297 | (ebx.split.physical_line_partition + 1) * |
| 274 | (ebx.split.ways_of_associativity + 1); | 298 | (ebx.split.ways_of_associativity + 1); |
| 275 | return 0; | 299 | return 0; |
| 276 | } | 300 | } |
| 277 | 301 | ||
| @@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 453 | 477 | ||
| 454 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 478 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
| 455 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 479 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
| 456 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 480 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
| 457 | 481 | ||
| 458 | #ifdef CONFIG_SMP | 482 | #ifdef CONFIG_SMP |
| 459 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 483 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
| @@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 490 | 514 | ||
| 491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 515 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
| 492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 516 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
| 493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 517 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
| 494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 518 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
| 495 | } | 519 | } |
| 496 | } | 520 | } |
| @@ -572,7 +596,7 @@ struct _index_kobject { | |||
| 572 | 596 | ||
| 573 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 597 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
| 574 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 598 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); |
| 575 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 599 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) |
| 576 | 600 | ||
| 577 | #define show_one_plus(file_name, object, val) \ | 601 | #define show_one_plus(file_name, object, val) \ |
| 578 | static ssize_t show_##file_name \ | 602 | static ssize_t show_##file_name \ |
| @@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { | |||
| 637 | } | 661 | } |
| 638 | } | 662 | } |
| 639 | 663 | ||
| 664 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
| 665 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
| 666 | |||
| 667 | #ifdef CONFIG_PCI | ||
| 668 | static struct pci_dev *get_k8_northbridge(int node) | ||
| 669 | { | ||
| 670 | struct pci_dev *dev = NULL; | ||
| 671 | int i; | ||
| 672 | |||
| 673 | for (i = 0; i <= node; i++) { | ||
| 674 | do { | ||
| 675 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
| 676 | if (!dev) | ||
| 677 | break; | ||
| 678 | } while (!pci_match_id(&k8_nb_id[0], dev)); | ||
| 679 | if (!dev) | ||
| 680 | break; | ||
| 681 | } | ||
| 682 | return dev; | ||
| 683 | } | ||
| 684 | #else | ||
| 685 | static struct pci_dev *get_k8_northbridge(int node) | ||
| 686 | { | ||
| 687 | return NULL; | ||
| 688 | } | ||
| 689 | #endif | ||
| 690 | |||
| 691 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | ||
| 692 | { | ||
| 693 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
| 694 | struct pci_dev *dev = NULL; | ||
| 695 | ssize_t ret = 0; | ||
| 696 | int i; | ||
| 697 | |||
| 698 | if (!this_leaf->can_disable) | ||
| 699 | return sprintf(buf, "Feature not enabled\n"); | ||
| 700 | |||
| 701 | dev = get_k8_northbridge(node); | ||
| 702 | if (!dev) { | ||
| 703 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
| 704 | return -EINVAL; | ||
| 705 | } | ||
| 706 | |||
| 707 | for (i = 0; i < 2; i++) { | ||
| 708 | unsigned int reg; | ||
| 709 | |||
| 710 | pci_read_config_dword(dev, 0x1BC + i * 4, ®); | ||
| 711 | |||
| 712 | ret += sprintf(buf, "%sEntry: %d\n", buf, i); | ||
| 713 | ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n", | ||
| 714 | buf, | ||
| 715 | reg & 0x80000000 ? "Disabled" : "Allowed", | ||
| 716 | reg & 0x40000000 ? "Disabled" : "Allowed"); | ||
| 717 | ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", | ||
| 718 | buf, (reg & 0x30000) >> 16, reg & 0xfff); | ||
| 719 | } | ||
| 720 | return ret; | ||
| 721 | } | ||
| 722 | |||
| 723 | static ssize_t | ||
| 724 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | ||
| 725 | size_t count) | ||
| 726 | { | ||
| 727 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
| 728 | struct pci_dev *dev = NULL; | ||
| 729 | unsigned int ret, index, val; | ||
| 730 | |||
| 731 | if (!this_leaf->can_disable) | ||
| 732 | return 0; | ||
| 733 | |||
| 734 | if (strlen(buf) > 15) | ||
| 735 | return -EINVAL; | ||
| 736 | |||
| 737 | ret = sscanf(buf, "%x %x", &index, &val); | ||
| 738 | if (ret != 2) | ||
| 739 | return -EINVAL; | ||
| 740 | if (index > 1) | ||
| 741 | return -EINVAL; | ||
| 742 | |||
| 743 | val |= 0xc0000000; | ||
| 744 | dev = get_k8_northbridge(node); | ||
| 745 | if (!dev) { | ||
| 746 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
| 747 | return -EINVAL; | ||
| 748 | } | ||
| 749 | |||
| 750 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | ||
| 751 | wbinvd(); | ||
| 752 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | ||
| 753 | |||
| 754 | return 1; | ||
| 755 | } | ||
| 756 | |||
| 640 | struct _cache_attr { | 757 | struct _cache_attr { |
| 641 | struct attribute attr; | 758 | struct attribute attr; |
| 642 | ssize_t (*show)(struct _cpuid4_info *, char *); | 759 | ssize_t (*show)(struct _cpuid4_info *, char *); |
| @@ -657,6 +774,8 @@ define_one_ro(size); | |||
| 657 | define_one_ro(shared_cpu_map); | 774 | define_one_ro(shared_cpu_map); |
| 658 | define_one_ro(shared_cpu_list); | 775 | define_one_ro(shared_cpu_list); |
| 659 | 776 | ||
| 777 | static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable); | ||
| 778 | |||
| 660 | static struct attribute * default_attrs[] = { | 779 | static struct attribute * default_attrs[] = { |
| 661 | &type.attr, | 780 | &type.attr, |
| 662 | &level.attr, | 781 | &level.attr, |
| @@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = { | |||
| 667 | &size.attr, | 786 | &size.attr, |
| 668 | &shared_cpu_map.attr, | 787 | &shared_cpu_map.attr, |
| 669 | &shared_cpu_list.attr, | 788 | &shared_cpu_list.attr, |
| 789 | &cache_disable.attr, | ||
| 670 | NULL | 790 | NULL |
| 671 | }; | 791 | }; |
| 672 | 792 | ||
| 673 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
| 674 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
| 675 | |||
| 676 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 793 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) |
| 677 | { | 794 | { |
| 678 | struct _cache_attr *fattr = to_attr(attr); | 795 | struct _cache_attr *fattr = to_attr(attr); |
| @@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
| 682 | ret = fattr->show ? | 799 | ret = fattr->show ? |
| 683 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 800 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
| 684 | buf) : | 801 | buf) : |
| 685 | 0; | 802 | 0; |
| 686 | return ret; | 803 | return ret; |
| 687 | } | 804 | } |
| 688 | 805 | ||
| 689 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 806 | static ssize_t store(struct kobject * kobj, struct attribute * attr, |
| 690 | const char * buf, size_t count) | 807 | const char * buf, size_t count) |
| 691 | { | 808 | { |
| 692 | return 0; | 809 | struct _cache_attr *fattr = to_attr(attr); |
| 810 | struct _index_kobject *this_leaf = to_object(kobj); | ||
| 811 | ssize_t ret; | ||
| 812 | |||
| 813 | ret = fattr->store ? | ||
| 814 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
| 815 | buf, count) : | ||
| 816 | 0; | ||
| 817 | return ret; | ||
| 693 | } | 818 | } |
| 694 | 819 | ||
| 695 | static struct sysfs_ops sysfs_ops = { | 820 | static struct sysfs_ops sysfs_ops = { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 726a5fcdf341..4b031a4ac856 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -860,7 +860,7 @@ error: | |||
| 860 | return err; | 860 | return err; |
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | static void mce_remove_device(unsigned int cpu) | 863 | static __cpuinit void mce_remove_device(unsigned int cpu) |
| 864 | { | 864 | { |
| 865 | int i; | 865 | int i; |
| 866 | 866 | ||
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl new file mode 100644 index 000000000000..dfea390e1608 --- /dev/null +++ b/arch/x86/kernel/cpu/mkcapflags.pl | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | #!/usr/bin/perl | ||
| 2 | # | ||
| 3 | # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h | ||
| 4 | # | ||
| 5 | |||
| 6 | ($in, $out) = @ARGV; | ||
| 7 | |||
| 8 | open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n"; | ||
| 9 | open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n"; | ||
| 10 | |||
| 11 | print OUT "#include <asm/cpufeature.h>\n\n"; | ||
| 12 | print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n"; | ||
| 13 | |||
| 14 | while (defined($line = <IN>)) { | ||
| 15 | if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) { | ||
| 16 | $macro = $1; | ||
| 17 | $feature = $2; | ||
| 18 | $tail = $3; | ||
| 19 | if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) { | ||
| 20 | $feature = $1; | ||
| 21 | } | ||
| 22 | |||
| 23 | if ($feature ne '') { | ||
| 24 | printf OUT "\t%-32s = \"%s\",\n", | ||
| 25 | "[$macro]", "\L$feature"; | ||
| 26 | } | ||
| 27 | } | ||
| 28 | } | ||
| 29 | print OUT "};\n"; | ||
| 30 | |||
| 31 | close(IN); | ||
| 32 | close(OUT); | ||
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c new file mode 100644 index 000000000000..5abbea297e0c --- /dev/null +++ b/arch/x86/kernel/cpu/powerflags.c | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* | ||
| 2 | * Strings for the various x86 power flags | ||
| 3 | * | ||
| 4 | * This file must not contain any executable code. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <asm/cpufeature.h> | ||
| 8 | |||
| 9 | const char *const x86_power_flags[32] = { | ||
| 10 | "ts", /* temperature sensor */ | ||
| 11 | "fid", /* frequency id control */ | ||
| 12 | "vid", /* voltage id control */ | ||
| 13 | "ttp", /* thermal trip */ | ||
| 14 | "tm", | ||
| 15 | "stc", | ||
| 16 | "100mhzsteps", | ||
| 17 | "hwpstate", | ||
| 18 | "", /* tsc invariant mapped to constant_tsc */ | ||
| 19 | /* nothing */ | ||
| 20 | }; | ||
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index b911a2c61b8f..52b3fefbd5af 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
| @@ -5,6 +5,18 @@ | |||
| 5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
| 6 | #include "cpu.h" | 6 | #include "cpu.h" |
| 7 | 7 | ||
| 8 | static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) | ||
| 9 | { | ||
| 10 | u32 xlvl; | ||
| 11 | |||
| 12 | /* Transmeta-defined flags: level 0x80860001 */ | ||
| 13 | xlvl = cpuid_eax(0x80860000); | ||
| 14 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
| 15 | if (xlvl >= 0x80860001) | ||
| 16 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
| 17 | } | ||
| 18 | } | ||
| 19 | |||
| 8 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | 20 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) |
| 9 | { | 21 | { |
| 10 | unsigned int cap_mask, uk, max, dummy; | 22 | unsigned int cap_mask, uk, max, dummy; |
| @@ -12,7 +24,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
| 12 | unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; | 24 | unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; |
| 13 | char cpu_info[65]; | 25 | char cpu_info[65]; |
| 14 | 26 | ||
| 15 | get_model_name(c); /* Same as AMD/Cyrix */ | 27 | early_init_transmeta(c); |
| 28 | |||
| 16 | display_cacheinfo(c); | 29 | display_cacheinfo(c); |
| 17 | 30 | ||
| 18 | /* Print CMS and CPU revision */ | 31 | /* Print CMS and CPU revision */ |
| @@ -85,23 +98,12 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
| 85 | #endif | 98 | #endif |
| 86 | } | 99 | } |
| 87 | 100 | ||
| 88 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c) | ||
| 89 | { | ||
| 90 | u32 xlvl; | ||
| 91 | |||
| 92 | /* Transmeta-defined flags: level 0x80860001 */ | ||
| 93 | xlvl = cpuid_eax(0x80860000); | ||
| 94 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
| 95 | if (xlvl >= 0x80860001) | ||
| 96 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | 101 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { |
| 101 | .c_vendor = "Transmeta", | 102 | .c_vendor = "Transmeta", |
| 102 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
| 104 | .c_early_init = early_init_transmeta, | ||
| 103 | .c_init = init_transmeta, | 105 | .c_init = init_transmeta, |
| 104 | .c_identify = transmeta_identify, | 106 | .c_x86_vendor = X86_VENDOR_TRANSMETA, |
| 105 | }; | 107 | }; |
| 106 | 108 | ||
| 107 | cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); | 109 | cpu_dev_register(transmeta_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index b1fc90989d75..e777f79e0960 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
| @@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = { | |||
| 19 | } | 19 | } |
| 20 | }, | 20 | }, |
| 21 | }, | 21 | }, |
| 22 | .c_x86_vendor = X86_VENDOR_UMC, | ||
| 22 | }; | 23 | }; |
| 23 | 24 | ||
| 24 | cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); | 25 | cpu_dev_register(umc_cpu_dev); |
| 25 | 26 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 66e48aa2dd1b..78e642feac30 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -148,6 +148,9 @@ void __init e820_print_map(char *who) | |||
| 148 | case E820_NVS: | 148 | case E820_NVS: |
| 149 | printk(KERN_CONT "(ACPI NVS)\n"); | 149 | printk(KERN_CONT "(ACPI NVS)\n"); |
| 150 | break; | 150 | break; |
| 151 | case E820_UNUSABLE: | ||
| 152 | printk("(unusable)\n"); | ||
| 153 | break; | ||
| 151 | default: | 154 | default: |
| 152 | printk(KERN_CONT "type %u\n", e820.map[i].type); | 155 | printk(KERN_CONT "type %u\n", e820.map[i].type); |
| 153 | break; | 156 | break; |
| @@ -1260,6 +1263,7 @@ static inline const char *e820_type_to_string(int e820_type) | |||
| 1260 | case E820_RAM: return "System RAM"; | 1263 | case E820_RAM: return "System RAM"; |
| 1261 | case E820_ACPI: return "ACPI Tables"; | 1264 | case E820_ACPI: return "ACPI Tables"; |
| 1262 | case E820_NVS: return "ACPI Non-volatile Storage"; | 1265 | case E820_NVS: return "ACPI Non-volatile Storage"; |
| 1266 | case E820_UNUSABLE: return "Unusable memory"; | ||
| 1263 | default: return "reserved"; | 1267 | default: return "reserved"; |
| 1264 | } | 1268 | } |
| 1265 | } | 1269 | } |
| @@ -1267,6 +1271,7 @@ static inline const char *e820_type_to_string(int e820_type) | |||
| 1267 | /* | 1271 | /* |
| 1268 | * Mark e820 reserved areas as busy for the resource manager. | 1272 | * Mark e820 reserved areas as busy for the resource manager. |
| 1269 | */ | 1273 | */ |
| 1274 | static struct resource __initdata *e820_res; | ||
| 1270 | void __init e820_reserve_resources(void) | 1275 | void __init e820_reserve_resources(void) |
| 1271 | { | 1276 | { |
| 1272 | int i; | 1277 | int i; |
| @@ -1274,6 +1279,7 @@ void __init e820_reserve_resources(void) | |||
| 1274 | u64 end; | 1279 | u64 end; |
| 1275 | 1280 | ||
| 1276 | res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); | 1281 | res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map); |
| 1282 | e820_res = res; | ||
| 1277 | for (i = 0; i < e820.nr_map; i++) { | 1283 | for (i = 0; i < e820.nr_map; i++) { |
| 1278 | end = e820.map[i].addr + e820.map[i].size - 1; | 1284 | end = e820.map[i].addr + e820.map[i].size - 1; |
| 1279 | #ifndef CONFIG_RESOURCES_64BIT | 1285 | #ifndef CONFIG_RESOURCES_64BIT |
| @@ -1287,7 +1293,14 @@ void __init e820_reserve_resources(void) | |||
| 1287 | res->end = end; | 1293 | res->end = end; |
| 1288 | 1294 | ||
| 1289 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 1295 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
| 1290 | insert_resource(&iomem_resource, res); | 1296 | |
| 1297 | /* | ||
| 1298 | * don't register the region that could be conflicted with | ||
| 1299 | * pci device BAR resource and insert them later in | ||
| 1300 | * pcibios_resource_survey() | ||
| 1301 | */ | ||
| 1302 | if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) | ||
| 1303 | insert_resource(&iomem_resource, res); | ||
| 1291 | res++; | 1304 | res++; |
| 1292 | } | 1305 | } |
| 1293 | 1306 | ||
| @@ -1299,6 +1312,19 @@ void __init e820_reserve_resources(void) | |||
| 1299 | } | 1312 | } |
| 1300 | } | 1313 | } |
| 1301 | 1314 | ||
| 1315 | void __init e820_reserve_resources_late(void) | ||
| 1316 | { | ||
| 1317 | int i; | ||
| 1318 | struct resource *res; | ||
| 1319 | |||
| 1320 | res = e820_res; | ||
| 1321 | for (i = 0; i < e820.nr_map; i++) { | ||
| 1322 | if (!res->parent && res->end) | ||
| 1323 | reserve_region_with_split(&iomem_resource, res->start, res->end, res->name); | ||
| 1324 | res++; | ||
| 1325 | } | ||
| 1326 | } | ||
| 1327 | |||
| 1302 | char *__init default_machine_specific_memory_setup(void) | 1328 | char *__init default_machine_specific_memory_setup(void) |
| 1303 | { | 1329 | { |
| 1304 | char *who = "BIOS-e820"; | 1330 | char *who = "BIOS-e820"; |
diff --git a/arch/x86/es7000/es7000plat.c b/arch/x86/kernel/es7000_32.c index 7789fde13c3f..849e5cd485b8 100644 --- a/arch/x86/es7000/es7000plat.c +++ b/arch/x86/kernel/es7000_32.c | |||
| @@ -39,10 +39,93 @@ | |||
| 39 | #include <asm/nmi.h> | 39 | #include <asm/nmi.h> |
| 40 | #include <asm/smp.h> | 40 | #include <asm/smp.h> |
| 41 | #include <asm/apicdef.h> | 41 | #include <asm/apicdef.h> |
| 42 | #include "es7000.h" | ||
| 43 | #include <mach_mpparse.h> | 42 | #include <mach_mpparse.h> |
| 44 | 43 | ||
| 45 | /* | 44 | /* |
| 45 | * ES7000 chipsets | ||
| 46 | */ | ||
| 47 | |||
| 48 | #define NON_UNISYS 0 | ||
| 49 | #define ES7000_CLASSIC 1 | ||
| 50 | #define ES7000_ZORRO 2 | ||
| 51 | |||
| 52 | |||
| 53 | #define MIP_REG 1 | ||
| 54 | #define MIP_PSAI_REG 4 | ||
| 55 | |||
| 56 | #define MIP_BUSY 1 | ||
| 57 | #define MIP_SPIN 0xf0000 | ||
| 58 | #define MIP_VALID 0x0100000000000000ULL | ||
| 59 | #define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) | ||
| 60 | |||
| 61 | #define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) | ||
| 62 | |||
| 63 | struct mip_reg_info { | ||
| 64 | unsigned long long mip_info; | ||
| 65 | unsigned long long delivery_info; | ||
| 66 | unsigned long long host_reg; | ||
| 67 | unsigned long long mip_reg; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct part_info { | ||
| 71 | unsigned char type; | ||
| 72 | unsigned char length; | ||
| 73 | unsigned char part_id; | ||
| 74 | unsigned char apic_mode; | ||
| 75 | unsigned long snum; | ||
| 76 | char ptype[16]; | ||
| 77 | char sname[64]; | ||
| 78 | char pname[64]; | ||
| 79 | }; | ||
| 80 | |||
| 81 | struct psai { | ||
| 82 | unsigned long long entry_type; | ||
| 83 | unsigned long long addr; | ||
| 84 | unsigned long long bep_addr; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct es7000_mem_info { | ||
| 88 | unsigned char type; | ||
| 89 | unsigned char length; | ||
| 90 | unsigned char resv[6]; | ||
| 91 | unsigned long long start; | ||
| 92 | unsigned long long size; | ||
| 93 | }; | ||
| 94 | |||
| 95 | struct es7000_oem_table { | ||
| 96 | unsigned long long hdr; | ||
| 97 | struct mip_reg_info mip; | ||
| 98 | struct part_info pif; | ||
| 99 | struct es7000_mem_info shm; | ||
| 100 | struct psai psai; | ||
| 101 | }; | ||
| 102 | |||
| 103 | #ifdef CONFIG_ACPI | ||
| 104 | |||
| 105 | struct oem_table { | ||
| 106 | struct acpi_table_header Header; | ||
| 107 | u32 OEMTableAddr; | ||
| 108 | u32 OEMTableSize; | ||
| 109 | }; | ||
| 110 | |||
| 111 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | ||
| 112 | #endif | ||
| 113 | |||
| 114 | struct mip_reg { | ||
| 115 | unsigned long long off_0; | ||
| 116 | unsigned long long off_8; | ||
| 117 | unsigned long long off_10; | ||
| 118 | unsigned long long off_18; | ||
| 119 | unsigned long long off_20; | ||
| 120 | unsigned long long off_28; | ||
| 121 | unsigned long long off_30; | ||
| 122 | unsigned long long off_38; | ||
| 123 | }; | ||
| 124 | |||
| 125 | #define MIP_SW_APIC 0x1020b | ||
| 126 | #define MIP_FUNC(VALUE) (VALUE & 0xff) | ||
| 127 | |||
| 128 | /* | ||
| 46 | * ES7000 Globals | 129 | * ES7000 Globals |
| 47 | */ | 130 | */ |
| 48 | 131 | ||
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index fed9f68efd66..e4bf2cc0d743 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
| @@ -120,14 +120,9 @@ static unsigned long set_apic_id(unsigned int id) | |||
| 120 | return x; | 120 | return x; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static unsigned int x2apic_read_id(void) | ||
| 124 | { | ||
| 125 | return apic_read(APIC_ID); | ||
| 126 | } | ||
| 127 | |||
| 128 | static unsigned int phys_pkg_id(int index_msb) | 123 | static unsigned int phys_pkg_id(int index_msb) |
| 129 | { | 124 | { |
| 130 | return x2apic_read_id() >> index_msb; | 125 | return current_cpu_data.initial_apicid >> index_msb; |
| 131 | } | 126 | } |
| 132 | 127 | ||
| 133 | static void x2apic_send_IPI_self(int vector) | 128 | static void x2apic_send_IPI_self(int vector) |
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 958d537b4cc9..8f1343df2627 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c | |||
| @@ -118,14 +118,9 @@ static unsigned long set_apic_id(unsigned int id) | |||
| 118 | return x; | 118 | return x; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static unsigned int x2apic_read_id(void) | ||
| 122 | { | ||
| 123 | return apic_read(APIC_ID); | ||
| 124 | } | ||
| 125 | |||
| 126 | static unsigned int phys_pkg_id(int index_msb) | 121 | static unsigned int phys_pkg_id(int index_msb) |
| 127 | { | 122 | { |
| 128 | return x2apic_read_id() >> index_msb; | 123 | return current_cpu_data.initial_apicid >> index_msb; |
| 129 | } | 124 | } |
| 130 | 125 | ||
| 131 | void x2apic_send_IPI_self(int vector) | 126 | void x2apic_send_IPI_self(int vector) |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index eb9ddd8efb82..45723f1fe198 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
| @@ -21,9 +21,12 @@ | |||
| 21 | # include <asm/sigcontext32.h> | 21 | # include <asm/sigcontext32.h> |
| 22 | # include <asm/user32.h> | 22 | # include <asm/user32.h> |
| 23 | #else | 23 | #else |
| 24 | # define save_i387_ia32 save_i387 | 24 | # define save_i387_xstate_ia32 save_i387_xstate |
| 25 | # define restore_i387_ia32 restore_i387 | 25 | # define restore_i387_xstate_ia32 restore_i387_xstate |
| 26 | # define _fpstate_ia32 _fpstate | 26 | # define _fpstate_ia32 _fpstate |
| 27 | # define _xstate_ia32 _xstate | ||
| 28 | # define sig_xstate_ia32_size sig_xstate_size | ||
| 29 | # define fx_sw_reserved_ia32 fx_sw_reserved | ||
| 27 | # define user_i387_ia32_struct user_i387_struct | 30 | # define user_i387_ia32_struct user_i387_struct |
| 28 | # define user32_fxsr_struct user_fxsr_struct | 31 | # define user32_fxsr_struct user_fxsr_struct |
| 29 | #endif | 32 | #endif |
| @@ -36,6 +39,7 @@ | |||
| 36 | 39 | ||
| 37 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 40 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
| 38 | unsigned int xstate_size; | 41 | unsigned int xstate_size; |
| 42 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); | ||
| 39 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 43 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; |
| 40 | 44 | ||
| 41 | void __cpuinit mxcsr_feature_mask_init(void) | 45 | void __cpuinit mxcsr_feature_mask_init(void) |
| @@ -61,6 +65,11 @@ void __init init_thread_xstate(void) | |||
| 61 | return; | 65 | return; |
| 62 | } | 66 | } |
| 63 | 67 | ||
| 68 | if (cpu_has_xsave) { | ||
| 69 | xsave_cntxt_init(); | ||
| 70 | return; | ||
| 71 | } | ||
| 72 | |||
| 64 | if (cpu_has_fxsr) | 73 | if (cpu_has_fxsr) |
| 65 | xstate_size = sizeof(struct i387_fxsave_struct); | 74 | xstate_size = sizeof(struct i387_fxsave_struct); |
| 66 | #ifdef CONFIG_X86_32 | 75 | #ifdef CONFIG_X86_32 |
| @@ -83,9 +92,19 @@ void __cpuinit fpu_init(void) | |||
| 83 | 92 | ||
| 84 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 93 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ |
| 85 | 94 | ||
| 95 | /* | ||
| 96 | * Boot processor to setup the FP and extended state context info. | ||
| 97 | */ | ||
| 98 | if (!smp_processor_id()) | ||
| 99 | init_thread_xstate(); | ||
| 100 | xsave_init(); | ||
| 101 | |||
| 86 | mxcsr_feature_mask_init(); | 102 | mxcsr_feature_mask_init(); |
| 87 | /* clean state in init */ | 103 | /* clean state in init */ |
| 88 | current_thread_info()->status = 0; | 104 | if (cpu_has_xsave) |
| 105 | current_thread_info()->status = TS_XSAVE; | ||
| 106 | else | ||
| 107 | current_thread_info()->status = 0; | ||
| 89 | clear_used_math(); | 108 | clear_used_math(); |
| 90 | } | 109 | } |
| 91 | #endif /* CONFIG_X86_64 */ | 110 | #endif /* CONFIG_X86_64 */ |
| @@ -195,6 +214,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
| 195 | */ | 214 | */ |
| 196 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 215 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; |
| 197 | 216 | ||
| 217 | /* | ||
| 218 | * update the header bits in the xsave header, indicating the | ||
| 219 | * presence of FP and SSE state. | ||
| 220 | */ | ||
| 221 | if (cpu_has_xsave) | ||
| 222 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | ||
| 223 | |||
| 198 | return ret; | 224 | return ret; |
| 199 | } | 225 | } |
| 200 | 226 | ||
| @@ -395,6 +421,12 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
| 395 | if (!ret) | 421 | if (!ret) |
| 396 | convert_to_fxsr(target, &env); | 422 | convert_to_fxsr(target, &env); |
| 397 | 423 | ||
| 424 | /* | ||
| 425 | * update the header bit in the xsave header, indicating the | ||
| 426 | * presence of FP. | ||
| 427 | */ | ||
| 428 | if (cpu_has_xsave) | ||
| 429 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; | ||
| 398 | return ret; | 430 | return ret; |
| 399 | } | 431 | } |
| 400 | 432 | ||
| @@ -407,7 +439,6 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
| 407 | struct task_struct *tsk = current; | 439 | struct task_struct *tsk = current; |
| 408 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 440 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; |
| 409 | 441 | ||
| 410 | unlazy_fpu(tsk); | ||
| 411 | fp->status = fp->swd; | 442 | fp->status = fp->swd; |
| 412 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) | 443 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) |
| 413 | return -1; | 444 | return -1; |
| @@ -421,8 +452,6 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
| 421 | struct user_i387_ia32_struct env; | 452 | struct user_i387_ia32_struct env; |
| 422 | int err = 0; | 453 | int err = 0; |
| 423 | 454 | ||
| 424 | unlazy_fpu(tsk); | ||
| 425 | |||
| 426 | convert_from_fxsr(&env, tsk); | 455 | convert_from_fxsr(&env, tsk); |
| 427 | if (__copy_to_user(buf, &env, sizeof(env))) | 456 | if (__copy_to_user(buf, &env, sizeof(env))) |
| 428 | return -1; | 457 | return -1; |
| @@ -432,16 +461,40 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
| 432 | if (err) | 461 | if (err) |
| 433 | return -1; | 462 | return -1; |
| 434 | 463 | ||
| 435 | if (__copy_to_user(&buf->_fxsr_env[0], fx, | 464 | if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size)) |
| 436 | sizeof(struct i387_fxsave_struct))) | ||
| 437 | return -1; | 465 | return -1; |
| 438 | return 1; | 466 | return 1; |
| 439 | } | 467 | } |
| 440 | 468 | ||
| 441 | int save_i387_ia32(struct _fpstate_ia32 __user *buf) | 469 | static int save_i387_xsave(void __user *buf) |
| 470 | { | ||
| 471 | struct _fpstate_ia32 __user *fx = buf; | ||
| 472 | int err = 0; | ||
| 473 | |||
| 474 | if (save_i387_fxsave(fx) < 0) | ||
| 475 | return -1; | ||
| 476 | |||
| 477 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32, | ||
| 478 | sizeof(struct _fpx_sw_bytes)); | ||
| 479 | err |= __put_user(FP_XSTATE_MAGIC2, | ||
| 480 | (__u32 __user *) (buf + sig_xstate_ia32_size | ||
| 481 | - FP_XSTATE_MAGIC2_SIZE)); | ||
| 482 | if (err) | ||
| 483 | return -1; | ||
| 484 | |||
| 485 | return 1; | ||
| 486 | } | ||
| 487 | |||
| 488 | int save_i387_xstate_ia32(void __user *buf) | ||
| 442 | { | 489 | { |
| 490 | struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; | ||
| 491 | struct task_struct *tsk = current; | ||
| 492 | |||
| 443 | if (!used_math()) | 493 | if (!used_math()) |
| 444 | return 0; | 494 | return 0; |
| 495 | |||
| 496 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size)) | ||
| 497 | return -EACCES; | ||
| 445 | /* | 498 | /* |
| 446 | * This will cause a "finit" to be triggered by the next | 499 | * This will cause a "finit" to be triggered by the next |
| 447 | * attempted FPU operation by the 'current' process. | 500 | * attempted FPU operation by the 'current' process. |
| @@ -451,13 +504,17 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
| 451 | if (!HAVE_HWFP) { | 504 | if (!HAVE_HWFP) { |
| 452 | return fpregs_soft_get(current, NULL, | 505 | return fpregs_soft_get(current, NULL, |
| 453 | 0, sizeof(struct user_i387_ia32_struct), | 506 | 0, sizeof(struct user_i387_ia32_struct), |
| 454 | NULL, buf) ? -1 : 1; | 507 | NULL, fp) ? -1 : 1; |
| 455 | } | 508 | } |
| 456 | 509 | ||
| 510 | unlazy_fpu(tsk); | ||
| 511 | |||
| 512 | if (cpu_has_xsave) | ||
| 513 | return save_i387_xsave(fp); | ||
| 457 | if (cpu_has_fxsr) | 514 | if (cpu_has_fxsr) |
| 458 | return save_i387_fxsave(buf); | 515 | return save_i387_fxsave(fp); |
| 459 | else | 516 | else |
| 460 | return save_i387_fsave(buf); | 517 | return save_i387_fsave(fp); |
| 461 | } | 518 | } |
| 462 | 519 | ||
| 463 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | 520 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) |
| @@ -468,14 +525,15 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
| 468 | sizeof(struct i387_fsave_struct)); | 525 | sizeof(struct i387_fsave_struct)); |
| 469 | } | 526 | } |
| 470 | 527 | ||
| 471 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | 528 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, |
| 529 | unsigned int size) | ||
| 472 | { | 530 | { |
| 473 | struct task_struct *tsk = current; | 531 | struct task_struct *tsk = current; |
| 474 | struct user_i387_ia32_struct env; | 532 | struct user_i387_ia32_struct env; |
| 475 | int err; | 533 | int err; |
| 476 | 534 | ||
| 477 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], | 535 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], |
| 478 | sizeof(struct i387_fxsave_struct)); | 536 | size); |
| 479 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 537 | /* mxcsr reserved bits must be masked to zero for security reasons */ |
| 480 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 538 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; |
| 481 | if (err || __copy_from_user(&env, buf, sizeof(env))) | 539 | if (err || __copy_from_user(&env, buf, sizeof(env))) |
| @@ -485,14 +543,69 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
| 485 | return 0; | 543 | return 0; |
| 486 | } | 544 | } |
| 487 | 545 | ||
| 488 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | 546 | static int restore_i387_xsave(void __user *buf) |
| 547 | { | ||
| 548 | struct _fpx_sw_bytes fx_sw_user; | ||
| 549 | struct _fpstate_ia32 __user *fx_user = | ||
| 550 | ((struct _fpstate_ia32 __user *) buf); | ||
| 551 | struct i387_fxsave_struct __user *fx = | ||
| 552 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; | ||
| 553 | struct xsave_hdr_struct *xsave_hdr = | ||
| 554 | ¤t->thread.xstate->xsave.xsave_hdr; | ||
| 555 | u64 mask; | ||
| 556 | int err; | ||
| 557 | |||
| 558 | if (check_for_xstate(fx, buf, &fx_sw_user)) | ||
| 559 | goto fx_only; | ||
| 560 | |||
| 561 | mask = fx_sw_user.xstate_bv; | ||
| 562 | |||
| 563 | err = restore_i387_fxsave(buf, fx_sw_user.xstate_size); | ||
| 564 | |||
| 565 | xsave_hdr->xstate_bv &= pcntxt_mask; | ||
| 566 | /* | ||
| 567 | * These bits must be zero. | ||
| 568 | */ | ||
| 569 | xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; | ||
| 570 | |||
| 571 | /* | ||
| 572 | * Init the state that is not present in the memory layout | ||
| 573 | * and enabled by the OS. | ||
| 574 | */ | ||
| 575 | mask = ~(pcntxt_mask & ~mask); | ||
| 576 | xsave_hdr->xstate_bv &= mask; | ||
| 577 | |||
| 578 | return err; | ||
| 579 | fx_only: | ||
| 580 | /* | ||
| 581 | * Couldn't find the extended state information in the memory | ||
| 582 | * layout. Restore the FP/SSE and init the other extended state | ||
| 583 | * enabled by the OS. | ||
| 584 | */ | ||
| 585 | xsave_hdr->xstate_bv = XSTATE_FPSSE; | ||
| 586 | return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct)); | ||
| 587 | } | ||
| 588 | |||
| 589 | int restore_i387_xstate_ia32(void __user *buf) | ||
| 489 | { | 590 | { |
| 490 | int err; | 591 | int err; |
| 491 | struct task_struct *tsk = current; | 592 | struct task_struct *tsk = current; |
| 593 | struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; | ||
| 492 | 594 | ||
| 493 | if (HAVE_HWFP) | 595 | if (HAVE_HWFP) |
| 494 | clear_fpu(tsk); | 596 | clear_fpu(tsk); |
| 495 | 597 | ||
| 598 | if (!buf) { | ||
| 599 | if (used_math()) { | ||
| 600 | clear_fpu(tsk); | ||
| 601 | clear_used_math(); | ||
| 602 | } | ||
| 603 | |||
| 604 | return 0; | ||
| 605 | } else | ||
| 606 | if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size)) | ||
| 607 | return -EACCES; | ||
| 608 | |||
| 496 | if (!used_math()) { | 609 | if (!used_math()) { |
| 497 | err = init_fpu(tsk); | 610 | err = init_fpu(tsk); |
| 498 | if (err) | 611 | if (err) |
| @@ -500,14 +613,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
| 500 | } | 613 | } |
| 501 | 614 | ||
| 502 | if (HAVE_HWFP) { | 615 | if (HAVE_HWFP) { |
| 503 | if (cpu_has_fxsr) | 616 | if (cpu_has_xsave) |
| 504 | err = restore_i387_fxsave(buf); | 617 | err = restore_i387_xsave(buf); |
| 618 | else if (cpu_has_fxsr) | ||
| 619 | err = restore_i387_fxsave(fp, sizeof(struct | ||
| 620 | i387_fxsave_struct)); | ||
| 505 | else | 621 | else |
| 506 | err = restore_i387_fsave(buf); | 622 | err = restore_i387_fsave(fp); |
| 507 | } else { | 623 | } else { |
| 508 | err = fpregs_soft_set(current, NULL, | 624 | err = fpregs_soft_set(current, NULL, |
| 509 | 0, sizeof(struct user_i387_ia32_struct), | 625 | 0, sizeof(struct user_i387_ia32_struct), |
| 510 | NULL, buf) != 0; | 626 | NULL, fp) != 0; |
| 511 | } | 627 | } |
| 512 | set_used_math(); | 628 | set_used_math(); |
| 513 | 629 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ec7a2ba9bce8..c622772744d8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -15,7 +15,6 @@ unsigned long idle_nomwait; | |||
| 15 | EXPORT_SYMBOL(idle_nomwait); | 15 | EXPORT_SYMBOL(idle_nomwait); |
| 16 | 16 | ||
| 17 | struct kmem_cache *task_xstate_cachep; | 17 | struct kmem_cache *task_xstate_cachep; |
| 18 | static int force_mwait __cpuinitdata; | ||
| 19 | 18 | ||
| 20 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 19 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| 21 | { | 20 | { |
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h index 8b4956e800ac..cc673aa55ce4 100644 --- a/arch/x86/kernel/sigframe.h +++ b/arch/x86/kernel/sigframe.h | |||
| @@ -3,9 +3,18 @@ struct sigframe { | |||
| 3 | char __user *pretcode; | 3 | char __user *pretcode; |
| 4 | int sig; | 4 | int sig; |
| 5 | struct sigcontext sc; | 5 | struct sigcontext sc; |
| 6 | struct _fpstate fpstate; | 6 | /* |
| 7 | * fpstate is unused. fpstate is moved/allocated after | ||
| 8 | * retcode[] below. This movement allows to have the FP state and the | ||
| 9 | * future state extensions (xsave) stay together. | ||
| 10 | * And at the same time retaining the unused fpstate, prevents changing | ||
| 11 | * the offset of extramask[] in the sigframe and thus prevent any | ||
| 12 | * legacy application accessing/modifying it. | ||
| 13 | */ | ||
| 14 | struct _fpstate fpstate_unused; | ||
| 7 | unsigned long extramask[_NSIG_WORDS-1]; | 15 | unsigned long extramask[_NSIG_WORDS-1]; |
| 8 | char retcode[8]; | 16 | char retcode[8]; |
| 17 | /* fp state follows here */ | ||
| 9 | }; | 18 | }; |
| 10 | 19 | ||
| 11 | struct rt_sigframe { | 20 | struct rt_sigframe { |
| @@ -15,14 +24,15 @@ struct rt_sigframe { | |||
| 15 | void __user *puc; | 24 | void __user *puc; |
| 16 | struct siginfo info; | 25 | struct siginfo info; |
| 17 | struct ucontext uc; | 26 | struct ucontext uc; |
| 18 | struct _fpstate fpstate; | ||
| 19 | char retcode[8]; | 27 | char retcode[8]; |
| 28 | /* fp state follows here */ | ||
| 20 | }; | 29 | }; |
| 21 | #else | 30 | #else |
| 22 | struct rt_sigframe { | 31 | struct rt_sigframe { |
| 23 | char __user *pretcode; | 32 | char __user *pretcode; |
| 24 | struct ucontext uc; | 33 | struct ucontext uc; |
| 25 | struct siginfo info; | 34 | struct siginfo info; |
| 35 | /* fp state follows here */ | ||
| 26 | }; | 36 | }; |
| 27 | 37 | ||
| 28 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 38 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 2a2435d3037d..b21070ea33a4 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
| @@ -161,28 +161,14 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | { | 163 | { |
| 164 | struct _fpstate __user *buf; | 164 | void __user *buf; |
| 165 | 165 | ||
| 166 | err |= __get_user(buf, &sc->fpstate); | 166 | err |= __get_user(buf, &sc->fpstate); |
| 167 | if (buf) { | 167 | err |= restore_i387_xstate(buf); |
| 168 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
| 169 | goto badframe; | ||
| 170 | err |= restore_i387(buf); | ||
| 171 | } else { | ||
| 172 | struct task_struct *me = current; | ||
| 173 | |||
| 174 | if (used_math()) { | ||
| 175 | clear_fpu(me); | ||
| 176 | clear_used_math(); | ||
| 177 | } | ||
| 178 | } | ||
| 179 | } | 168 | } |
| 180 | 169 | ||
| 181 | err |= __get_user(*pax, &sc->ax); | 170 | err |= __get_user(*pax, &sc->ax); |
| 182 | return err; | 171 | return err; |
| 183 | |||
| 184 | badframe: | ||
| 185 | return 1; | ||
| 186 | } | 172 | } |
| 187 | 173 | ||
| 188 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) | 174 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) |
| @@ -264,7 +250,7 @@ badframe: | |||
| 264 | * Set up a signal frame. | 250 | * Set up a signal frame. |
| 265 | */ | 251 | */ |
| 266 | static int | 252 | static int |
| 267 | setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | 253 | setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
| 268 | struct pt_regs *regs, unsigned long mask) | 254 | struct pt_regs *regs, unsigned long mask) |
| 269 | { | 255 | { |
| 270 | int tmp, err = 0; | 256 | int tmp, err = 0; |
| @@ -291,7 +277,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
| 291 | err |= __put_user(regs->sp, &sc->sp_at_signal); | 277 | err |= __put_user(regs->sp, &sc->sp_at_signal); |
| 292 | err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); | 278 | err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); |
| 293 | 279 | ||
| 294 | tmp = save_i387(fpstate); | 280 | tmp = save_i387_xstate(fpstate); |
| 295 | if (tmp < 0) | 281 | if (tmp < 0) |
| 296 | err = 1; | 282 | err = 1; |
| 297 | else | 283 | else |
| @@ -308,7 +294,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
| 308 | * Determine which stack to use.. | 294 | * Determine which stack to use.. |
| 309 | */ | 295 | */ |
| 310 | static inline void __user * | 296 | static inline void __user * |
| 311 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 297 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
| 298 | void **fpstate) | ||
| 312 | { | 299 | { |
| 313 | unsigned long sp; | 300 | unsigned long sp; |
| 314 | 301 | ||
| @@ -334,6 +321,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
| 334 | sp = (unsigned long) ka->sa.sa_restorer; | 321 | sp = (unsigned long) ka->sa.sa_restorer; |
| 335 | } | 322 | } |
| 336 | 323 | ||
| 324 | if (used_math()) { | ||
| 325 | sp = sp - sig_xstate_size; | ||
| 326 | *fpstate = (struct _fpstate *) sp; | ||
| 327 | } | ||
| 328 | |||
| 337 | sp -= frame_size; | 329 | sp -= frame_size; |
| 338 | /* | 330 | /* |
| 339 | * Align the stack pointer according to the i386 ABI, | 331 | * Align the stack pointer according to the i386 ABI, |
| @@ -352,8 +344,9 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
| 352 | void __user *restorer; | 344 | void __user *restorer; |
| 353 | int err = 0; | 345 | int err = 0; |
| 354 | int usig; | 346 | int usig; |
| 347 | void __user *fpstate = NULL; | ||
| 355 | 348 | ||
| 356 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 349 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
| 357 | 350 | ||
| 358 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 351 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
| 359 | goto give_sigsegv; | 352 | goto give_sigsegv; |
| @@ -368,7 +361,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
| 368 | if (err) | 361 | if (err) |
| 369 | goto give_sigsegv; | 362 | goto give_sigsegv; |
| 370 | 363 | ||
| 371 | err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); | 364 | err = setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]); |
| 372 | if (err) | 365 | if (err) |
| 373 | goto give_sigsegv; | 366 | goto give_sigsegv; |
| 374 | 367 | ||
| @@ -429,8 +422,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 429 | void __user *restorer; | 422 | void __user *restorer; |
| 430 | int err = 0; | 423 | int err = 0; |
| 431 | int usig; | 424 | int usig; |
| 425 | void __user *fpstate = NULL; | ||
| 432 | 426 | ||
| 433 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 427 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
| 434 | 428 | ||
| 435 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 429 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
| 436 | goto give_sigsegv; | 430 | goto give_sigsegv; |
| @@ -449,13 +443,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 449 | goto give_sigsegv; | 443 | goto give_sigsegv; |
| 450 | 444 | ||
| 451 | /* Create the ucontext. */ | 445 | /* Create the ucontext. */ |
| 452 | err |= __put_user(0, &frame->uc.uc_flags); | 446 | if (cpu_has_xsave) |
| 447 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
| 448 | else | ||
| 449 | err |= __put_user(0, &frame->uc.uc_flags); | ||
| 453 | err |= __put_user(0, &frame->uc.uc_link); | 450 | err |= __put_user(0, &frame->uc.uc_link); |
| 454 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 451 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
| 455 | err |= __put_user(sas_ss_flags(regs->sp), | 452 | err |= __put_user(sas_ss_flags(regs->sp), |
| 456 | &frame->uc.uc_stack.ss_flags); | 453 | &frame->uc.uc_stack.ss_flags); |
| 457 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 454 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
| 458 | err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 455 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
| 459 | regs, set->sig[0]); | 456 | regs, set->sig[0]); |
| 460 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 457 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
| 461 | if (err) | 458 | if (err) |
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index 694aa888bb19..823a55bf8c39 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
| @@ -53,69 +53,6 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | |||
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * Signal frame handlers. | ||
| 57 | */ | ||
| 58 | |||
| 59 | static inline int save_i387(struct _fpstate __user *buf) | ||
| 60 | { | ||
| 61 | struct task_struct *tsk = current; | ||
| 62 | int err = 0; | ||
| 63 | |||
| 64 | BUILD_BUG_ON(sizeof(struct user_i387_struct) != | ||
| 65 | sizeof(tsk->thread.xstate->fxsave)); | ||
| 66 | |||
| 67 | if ((unsigned long)buf % 16) | ||
| 68 | printk("save_i387: bad fpstate %p\n", buf); | ||
| 69 | |||
| 70 | if (!used_math()) | ||
| 71 | return 0; | ||
| 72 | clear_used_math(); /* trigger finit */ | ||
| 73 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
| 74 | err = save_i387_checking((struct i387_fxsave_struct __user *) | ||
| 75 | buf); | ||
| 76 | if (err) | ||
| 77 | return err; | ||
| 78 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
| 79 | stts(); | ||
| 80 | } else { | ||
| 81 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | ||
| 82 | sizeof(struct i387_fxsave_struct))) | ||
| 83 | return -1; | ||
| 84 | } | ||
| 85 | return 1; | ||
| 86 | } | ||
| 87 | |||
| 88 | /* | ||
| 89 | * This restores directly out of user space. Exceptions are handled. | ||
| 90 | */ | ||
| 91 | static inline int restore_i387(struct _fpstate __user *buf) | ||
| 92 | { | ||
| 93 | struct task_struct *tsk = current; | ||
| 94 | int err; | ||
| 95 | |||
| 96 | if (!used_math()) { | ||
| 97 | err = init_fpu(tsk); | ||
| 98 | if (err) | ||
| 99 | return err; | ||
| 100 | } | ||
| 101 | |||
| 102 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
| 103 | clts(); | ||
| 104 | task_thread_info(current)->status |= TS_USEDFPU; | ||
| 105 | } | ||
| 106 | err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | ||
| 107 | if (unlikely(err)) { | ||
| 108 | /* | ||
| 109 | * Encountered an error while doing the restore from the | ||
| 110 | * user buffer, clear the fpu state. | ||
| 111 | */ | ||
| 112 | clear_fpu(tsk); | ||
| 113 | clear_used_math(); | ||
| 114 | } | ||
| 115 | return err; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Do a signal return; undo the signal stack. | 56 | * Do a signal return; undo the signal stack. |
| 120 | */ | 57 | */ |
| 121 | static int | 58 | static int |
| @@ -159,25 +96,11 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
| 159 | { | 96 | { |
| 160 | struct _fpstate __user *buf; | 97 | struct _fpstate __user *buf; |
| 161 | err |= __get_user(buf, &sc->fpstate); | 98 | err |= __get_user(buf, &sc->fpstate); |
| 162 | 99 | err |= restore_i387_xstate(buf); | |
| 163 | if (buf) { | ||
| 164 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
| 165 | goto badframe; | ||
| 166 | err |= restore_i387(buf); | ||
| 167 | } else { | ||
| 168 | struct task_struct *me = current; | ||
| 169 | if (used_math()) { | ||
| 170 | clear_fpu(me); | ||
| 171 | clear_used_math(); | ||
| 172 | } | ||
| 173 | } | ||
| 174 | } | 100 | } |
| 175 | 101 | ||
| 176 | err |= __get_user(*pax, &sc->ax); | 102 | err |= __get_user(*pax, &sc->ax); |
| 177 | return err; | 103 | return err; |
| 178 | |||
| 179 | badframe: | ||
| 180 | return 1; | ||
| 181 | } | 104 | } |
| 182 | 105 | ||
| 183 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 106 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) |
| @@ -269,26 +192,23 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) | |||
| 269 | sp = current->sas_ss_sp + current->sas_ss_size; | 192 | sp = current->sas_ss_sp + current->sas_ss_size; |
| 270 | } | 193 | } |
| 271 | 194 | ||
| 272 | return (void __user *)round_down(sp - size, 16); | 195 | return (void __user *)round_down(sp - size, 64); |
| 273 | } | 196 | } |
| 274 | 197 | ||
| 275 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 198 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
| 276 | sigset_t *set, struct pt_regs *regs) | 199 | sigset_t *set, struct pt_regs *regs) |
| 277 | { | 200 | { |
| 278 | struct rt_sigframe __user *frame; | 201 | struct rt_sigframe __user *frame; |
| 279 | struct _fpstate __user *fp = NULL; | 202 | void __user *fp = NULL; |
| 280 | int err = 0; | 203 | int err = 0; |
| 281 | struct task_struct *me = current; | 204 | struct task_struct *me = current; |
| 282 | 205 | ||
| 283 | if (used_math()) { | 206 | if (used_math()) { |
| 284 | fp = get_stack(ka, regs, sizeof(struct _fpstate)); | 207 | fp = get_stack(ka, regs, sig_xstate_size); |
| 285 | frame = (void __user *)round_down( | 208 | frame = (void __user *)round_down( |
| 286 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | 209 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; |
| 287 | 210 | ||
| 288 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) | 211 | if (save_i387_xstate(fp) < 0) |
| 289 | goto give_sigsegv; | ||
| 290 | |||
| 291 | if (save_i387(fp) < 0) | ||
| 292 | err |= -1; | 212 | err |= -1; |
| 293 | } else | 213 | } else |
| 294 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; | 214 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; |
| @@ -303,7 +223,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
| 303 | } | 223 | } |
| 304 | 224 | ||
| 305 | /* Create the ucontext. */ | 225 | /* Create the ucontext. */ |
| 306 | err |= __put_user(0, &frame->uc.uc_flags); | 226 | if (cpu_has_xsave) |
| 227 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
| 228 | else | ||
| 229 | err |= __put_user(0, &frame->uc.uc_flags); | ||
| 307 | err |= __put_user(0, &frame->uc.uc_link); | 230 | err |= __put_user(0, &frame->uc.uc_link); |
| 308 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 231 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
| 309 | err |= __put_user(sas_ss_flags(regs->sp), | 232 | err |= __put_user(sas_ss_flags(regs->sp), |
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 03df8e45e5a1..da5a5964fccb 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
| @@ -1228,7 +1228,6 @@ void __init trap_init(void) | |||
| 1228 | 1228 | ||
| 1229 | set_bit(SYSCALL_VECTOR, used_vectors); | 1229 | set_bit(SYSCALL_VECTOR, used_vectors); |
| 1230 | 1230 | ||
| 1231 | init_thread_xstate(); | ||
| 1232 | /* | 1231 | /* |
| 1233 | * Should be a barrier for any external CPU state: | 1232 | * Should be a barrier for any external CPU state: |
| 1234 | */ | 1233 | */ |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 7a31f104bef9..2887a789e38f 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
| @@ -1138,7 +1138,7 @@ asmlinkage void math_state_restore(void) | |||
| 1138 | /* | 1138 | /* |
| 1139 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | 1139 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. |
| 1140 | */ | 1140 | */ |
| 1141 | if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) { | 1141 | if (unlikely(restore_fpu_checking(me))) { |
| 1142 | stts(); | 1142 | stts(); |
| 1143 | force_sig(SIGSEGV, me); | 1143 | force_sig(SIGSEGV, me); |
| 1144 | return; | 1144 | return; |
| @@ -1179,10 +1179,6 @@ void __init trap_init(void) | |||
| 1179 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 1179 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
| 1180 | #endif | 1180 | #endif |
| 1181 | /* | 1181 | /* |
| 1182 | * initialize the per thread extended state: | ||
| 1183 | */ | ||
| 1184 | init_thread_xstate(); | ||
| 1185 | /* | ||
| 1186 | * Should be a barrier for any external CPU state: | 1182 | * Should be a barrier for any external CPU state: |
| 1187 | */ | 1183 | */ |
| 1188 | cpu_init(); | 1184 | cpu_init(); |
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index af5bdad84604..a9b8560adbc2 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S | |||
| @@ -140,10 +140,10 @@ SECTIONS | |||
| 140 | *(.con_initcall.init) | 140 | *(.con_initcall.init) |
| 141 | __con_initcall_end = .; | 141 | __con_initcall_end = .; |
| 142 | } | 142 | } |
| 143 | .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { | 143 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
| 144 | __x86cpuvendor_start = .; | 144 | __x86_cpu_dev_start = .; |
| 145 | *(.x86cpuvendor.init) | 145 | *(.x86_cpu_dev.init) |
| 146 | __x86cpuvendor_end = .; | 146 | __x86_cpu_dev_end = .; |
| 147 | } | 147 | } |
| 148 | SECURITY_INIT | 148 | SECURITY_INIT |
| 149 | . = ALIGN(4); | 149 | . = ALIGN(4); |
| @@ -180,6 +180,7 @@ SECTIONS | |||
| 180 | . = ALIGN(PAGE_SIZE); | 180 | . = ALIGN(PAGE_SIZE); |
| 181 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { | 181 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { |
| 182 | __per_cpu_start = .; | 182 | __per_cpu_start = .; |
| 183 | *(.data.percpu.page_aligned) | ||
| 183 | *(.data.percpu) | 184 | *(.data.percpu) |
| 184 | *(.data.percpu.shared_aligned) | 185 | *(.data.percpu.shared_aligned) |
| 185 | __per_cpu_end = .; | 186 | __per_cpu_end = .; |
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index 63e5c1a22e88..201e81a91a95 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S | |||
| @@ -168,13 +168,12 @@ SECTIONS | |||
| 168 | *(.con_initcall.init) | 168 | *(.con_initcall.init) |
| 169 | } | 169 | } |
| 170 | __con_initcall_end = .; | 170 | __con_initcall_end = .; |
| 171 | . = ALIGN(16); | 171 | __x86_cpu_dev_start = .; |
| 172 | __x86cpuvendor_start = .; | 172 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
| 173 | .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { | 173 | *(.x86_cpu_dev.init) |
| 174 | *(.x86cpuvendor.init) | ||
| 175 | } | 174 | } |
| 176 | __x86cpuvendor_end = .; | ||
| 177 | SECURITY_INIT | 175 | SECURITY_INIT |
| 176 | __x86_cpu_dev_end = .; | ||
| 178 | 177 | ||
| 179 | . = ALIGN(8); | 178 | . = ALIGN(8); |
| 180 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { | 179 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c new file mode 100644 index 000000000000..07713d64debe --- /dev/null +++ b/arch/x86/kernel/xsave.c | |||
| @@ -0,0 +1,316 @@ | |||
| 1 | /* | ||
| 2 | * xsave/xrstor support. | ||
| 3 | * | ||
| 4 | * Author: Suresh Siddha <suresh.b.siddha@intel.com> | ||
| 5 | */ | ||
| 6 | #include <linux/bootmem.h> | ||
| 7 | #include <linux/compat.h> | ||
| 8 | #include <asm/i387.h> | ||
| 9 | #ifdef CONFIG_IA32_EMULATION | ||
| 10 | #include <asm/sigcontext32.h> | ||
| 11 | #endif | ||
| 12 | #include <asm/xcr.h> | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Supported feature mask by the CPU and the kernel. | ||
| 16 | */ | ||
| 17 | u64 pcntxt_mask; | ||
| 18 | |||
| 19 | struct _fpx_sw_bytes fx_sw_reserved; | ||
| 20 | #ifdef CONFIG_IA32_EMULATION | ||
| 21 | struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
| 22 | #endif | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Check for the presence of extended state information in the | ||
| 26 | * user fpstate pointer in the sigcontext. | ||
| 27 | */ | ||
| 28 | int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
| 29 | void __user *fpstate, | ||
| 30 | struct _fpx_sw_bytes *fx_sw_user) | ||
| 31 | { | ||
| 32 | int min_xstate_size = sizeof(struct i387_fxsave_struct) + | ||
| 33 | sizeof(struct xsave_hdr_struct); | ||
| 34 | unsigned int magic2; | ||
| 35 | int err; | ||
| 36 | |||
| 37 | err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], | ||
| 38 | sizeof(struct _fpx_sw_bytes)); | ||
| 39 | |||
| 40 | if (err) | ||
| 41 | return err; | ||
| 42 | |||
| 43 | /* | ||
| 44 | * First Magic check failed. | ||
| 45 | */ | ||
| 46 | if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) | ||
| 47 | return -1; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Check for error scenarios. | ||
| 51 | */ | ||
| 52 | if (fx_sw_user->xstate_size < min_xstate_size || | ||
| 53 | fx_sw_user->xstate_size > xstate_size || | ||
| 54 | fx_sw_user->xstate_size > fx_sw_user->extended_size) | ||
| 55 | return -1; | ||
| 56 | |||
| 57 | err = __get_user(magic2, (__u32 *) (((void *)fpstate) + | ||
| 58 | fx_sw_user->extended_size - | ||
| 59 | FP_XSTATE_MAGIC2_SIZE)); | ||
| 60 | /* | ||
| 61 | * Check for the presence of second magic word at the end of memory | ||
| 62 | * layout. This detects the case where the user just copied the legacy | ||
| 63 | * fpstate layout with out copying the extended state information | ||
| 64 | * in the memory layout. | ||
| 65 | */ | ||
| 66 | if (err || magic2 != FP_XSTATE_MAGIC2) | ||
| 67 | return -1; | ||
| 68 | |||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | #ifdef CONFIG_X86_64 | ||
| 73 | /* | ||
| 74 | * Signal frame handlers. | ||
| 75 | */ | ||
| 76 | |||
| 77 | int save_i387_xstate(void __user *buf) | ||
| 78 | { | ||
| 79 | struct task_struct *tsk = current; | ||
| 80 | int err = 0; | ||
| 81 | |||
| 82 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size)) | ||
| 83 | return -EACCES; | ||
| 84 | |||
| 85 | BUG_ON(sig_xstate_size < xstate_size); | ||
| 86 | |||
| 87 | if ((unsigned long)buf % 64) | ||
| 88 | printk("save_i387_xstate: bad fpstate %p\n", buf); | ||
| 89 | |||
| 90 | if (!used_math()) | ||
| 91 | return 0; | ||
| 92 | clear_used_math(); /* trigger finit */ | ||
| 93 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
| 94 | /* | ||
| 95 | * Start with clearing the user buffer. This will present a | ||
| 96 | * clean context for the bytes not touched by the fxsave/xsave. | ||
| 97 | */ | ||
| 98 | __clear_user(buf, sig_xstate_size); | ||
| 99 | |||
| 100 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
| 101 | err = xsave_user(buf); | ||
| 102 | else | ||
| 103 | err = fxsave_user(buf); | ||
| 104 | |||
| 105 | if (err) | ||
| 106 | return err; | ||
| 107 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
| 108 | stts(); | ||
| 109 | } else { | ||
| 110 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | ||
| 111 | xstate_size)) | ||
| 112 | return -1; | ||
| 113 | } | ||
| 114 | |||
| 115 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
| 116 | struct _fpstate __user *fx = buf; | ||
| 117 | |||
| 118 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved, | ||
| 119 | sizeof(struct _fpx_sw_bytes)); | ||
| 120 | |||
| 121 | err |= __put_user(FP_XSTATE_MAGIC2, | ||
| 122 | (__u32 __user *) (buf + sig_xstate_size | ||
| 123 | - FP_XSTATE_MAGIC2_SIZE)); | ||
| 124 | } | ||
| 125 | |||
| 126 | return 1; | ||
| 127 | } | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Restore the extended state if present. Otherwise, restore the FP/SSE | ||
| 131 | * state. | ||
| 132 | */ | ||
| 133 | int restore_user_xstate(void __user *buf) | ||
| 134 | { | ||
| 135 | struct _fpx_sw_bytes fx_sw_user; | ||
| 136 | u64 mask; | ||
| 137 | int err; | ||
| 138 | |||
| 139 | if (((unsigned long)buf % 64) || | ||
| 140 | check_for_xstate(buf, buf, &fx_sw_user)) | ||
| 141 | goto fx_only; | ||
| 142 | |||
| 143 | mask = fx_sw_user.xstate_bv; | ||
| 144 | |||
| 145 | /* | ||
| 146 | * restore the state passed by the user. | ||
| 147 | */ | ||
| 148 | err = xrestore_user(buf, mask); | ||
| 149 | if (err) | ||
| 150 | return err; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * init the state skipped by the user. | ||
| 154 | */ | ||
| 155 | mask = pcntxt_mask & ~mask; | ||
| 156 | |||
| 157 | xrstor_state(init_xstate_buf, mask); | ||
| 158 | |||
| 159 | return 0; | ||
| 160 | |||
| 161 | fx_only: | ||
| 162 | /* | ||
| 163 | * couldn't find the extended state information in the | ||
| 164 | * memory layout. Restore just the FP/SSE and init all | ||
| 165 | * the other extended state. | ||
| 166 | */ | ||
| 167 | xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); | ||
| 168 | return fxrstor_checking((__force struct i387_fxsave_struct *)buf); | ||
| 169 | } | ||
| 170 | |||
| 171 | /* | ||
| 172 | * This restores directly out of user space. Exceptions are handled. | ||
| 173 | */ | ||
| 174 | int restore_i387_xstate(void __user *buf) | ||
| 175 | { | ||
| 176 | struct task_struct *tsk = current; | ||
| 177 | int err = 0; | ||
| 178 | |||
| 179 | if (!buf) { | ||
| 180 | if (used_math()) | ||
| 181 | goto clear; | ||
| 182 | return 0; | ||
| 183 | } else | ||
| 184 | if (!access_ok(VERIFY_READ, buf, sig_xstate_size)) | ||
| 185 | return -EACCES; | ||
| 186 | |||
| 187 | if (!used_math()) { | ||
| 188 | err = init_fpu(tsk); | ||
| 189 | if (err) | ||
| 190 | return err; | ||
| 191 | } | ||
| 192 | |||
| 193 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
| 194 | clts(); | ||
| 195 | task_thread_info(current)->status |= TS_USEDFPU; | ||
| 196 | } | ||
| 197 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
| 198 | err = restore_user_xstate(buf); | ||
| 199 | else | ||
| 200 | err = fxrstor_checking((__force struct i387_fxsave_struct *) | ||
| 201 | buf); | ||
| 202 | if (unlikely(err)) { | ||
| 203 | /* | ||
| 204 | * Encountered an error while doing the restore from the | ||
| 205 | * user buffer, clear the fpu state. | ||
| 206 | */ | ||
| 207 | clear: | ||
| 208 | clear_fpu(tsk); | ||
| 209 | clear_used_math(); | ||
| 210 | } | ||
| 211 | return err; | ||
| 212 | } | ||
| 213 | #endif | ||
| 214 | |||
| 215 | /* | ||
| 216 | * Prepare the SW reserved portion of the fxsave memory layout, indicating | ||
| 217 | * the presence of the extended state information in the memory layout | ||
| 218 | * pointed by the fpstate pointer in the sigcontext. | ||
| 219 | * This will be saved when ever the FP and extended state context is | ||
| 220 | * saved on the user stack during the signal handler delivery to the user. | ||
| 221 | */ | ||
| 222 | void prepare_fx_sw_frame(void) | ||
| 223 | { | ||
| 224 | int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) + | ||
| 225 | FP_XSTATE_MAGIC2_SIZE; | ||
| 226 | |||
| 227 | sig_xstate_size = sizeof(struct _fpstate) + size_extended; | ||
| 228 | |||
| 229 | #ifdef CONFIG_IA32_EMULATION | ||
| 230 | sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended; | ||
| 231 | #endif | ||
| 232 | |||
| 233 | memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved)); | ||
| 234 | |||
| 235 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; | ||
| 236 | fx_sw_reserved.extended_size = sig_xstate_size; | ||
| 237 | fx_sw_reserved.xstate_bv = pcntxt_mask; | ||
| 238 | fx_sw_reserved.xstate_size = xstate_size; | ||
| 239 | #ifdef CONFIG_IA32_EMULATION | ||
| 240 | memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved, | ||
| 241 | sizeof(struct _fpx_sw_bytes)); | ||
| 242 | fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size; | ||
| 243 | #endif | ||
| 244 | } | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Represents init state for the supported extended state. | ||
| 248 | */ | ||
| 249 | struct xsave_struct *init_xstate_buf; | ||
| 250 | |||
| 251 | #ifdef CONFIG_X86_64 | ||
| 252 | unsigned int sig_xstate_size = sizeof(struct _fpstate); | ||
| 253 | #endif | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Enable the extended processor state save/restore feature | ||
| 257 | */ | ||
| 258 | void __cpuinit xsave_init(void) | ||
| 259 | { | ||
| 260 | if (!cpu_has_xsave) | ||
| 261 | return; | ||
| 262 | |||
| 263 | set_in_cr4(X86_CR4_OSXSAVE); | ||
| 264 | |||
| 265 | /* | ||
| 266 | * Enable all the features that the HW is capable of | ||
| 267 | * and the Linux kernel is aware of. | ||
| 268 | */ | ||
| 269 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * setup the xstate image representing the init state | ||
| 274 | */ | ||
| 275 | void setup_xstate_init(void) | ||
| 276 | { | ||
| 277 | init_xstate_buf = alloc_bootmem(xstate_size); | ||
| 278 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* | ||
| 282 | * Enable and initialize the xsave feature. | ||
| 283 | */ | ||
| 284 | void __init xsave_cntxt_init(void) | ||
| 285 | { | ||
| 286 | unsigned int eax, ebx, ecx, edx; | ||
| 287 | |||
| 288 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | ||
| 289 | pcntxt_mask = eax + ((u64)edx << 32); | ||
| 290 | |||
| 291 | if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { | ||
| 292 | printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n", | ||
| 293 | pcntxt_mask); | ||
| 294 | BUG(); | ||
| 295 | } | ||
| 296 | |||
| 297 | /* | ||
| 298 | * for now OS knows only about FP/SSE | ||
| 299 | */ | ||
| 300 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; | ||
| 301 | xsave_init(); | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Recompute the context size for enabled features | ||
| 305 | */ | ||
| 306 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | ||
| 307 | xstate_size = ebx; | ||
| 308 | |||
| 309 | prepare_fx_sw_frame(); | ||
| 310 | |||
| 311 | setup_xstate_init(); | ||
| 312 | |||
| 313 | printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, " | ||
| 314 | "cntxt size 0x%x\n", | ||
| 315 | pcntxt_mask, xstate_size); | ||
| 316 | } | ||
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 23e8373507ad..17e25995b65b 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h | |||
| @@ -331,21 +331,6 @@ enum vmcs_field { | |||
| 331 | 331 | ||
| 332 | #define AR_RESERVD_MASK 0xfffe0f00 | 332 | #define AR_RESERVD_MASK 0xfffe0f00 |
| 333 | 333 | ||
| 334 | #define MSR_IA32_VMX_BASIC 0x480 | ||
| 335 | #define MSR_IA32_VMX_PINBASED_CTLS 0x481 | ||
| 336 | #define MSR_IA32_VMX_PROCBASED_CTLS 0x482 | ||
| 337 | #define MSR_IA32_VMX_EXIT_CTLS 0x483 | ||
| 338 | #define MSR_IA32_VMX_ENTRY_CTLS 0x484 | ||
| 339 | #define MSR_IA32_VMX_MISC 0x485 | ||
| 340 | #define MSR_IA32_VMX_CR0_FIXED0 0x486 | ||
| 341 | #define MSR_IA32_VMX_CR0_FIXED1 0x487 | ||
| 342 | #define MSR_IA32_VMX_CR4_FIXED0 0x488 | ||
| 343 | #define MSR_IA32_VMX_CR4_FIXED1 0x489 | ||
| 344 | #define MSR_IA32_VMX_VMCS_ENUM 0x48a | ||
| 345 | #define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b | ||
| 346 | #define MSR_IA32_VMX_EPT_VPID_CAP 0x48c | ||
| 347 | |||
| 348 | #define MSR_IA32_FEATURE_CONTROL 0x3a | ||
| 349 | #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 | 334 | #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 |
| 350 | #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 | 335 | #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 |
| 351 | 336 | ||
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index aa3fa4119424..55e11aa6d66c 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
| @@ -17,9 +17,6 @@ ifeq ($(CONFIG_X86_32),y) | |||
| 17 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o | 17 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o |
| 18 | else | 18 | else |
| 19 | obj-y += io_64.o iomap_copy_64.o | 19 | obj-y += io_64.o iomap_copy_64.o |
| 20 | |||
| 21 | CFLAGS_csum-partial_64.o := -funroll-loops | ||
| 22 | |||
| 23 | lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o | 20 | lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o |
| 24 | lib-y += thunk_64.o clear_page_64.o copy_page_64.o | 21 | lib-y += thunk_64.o clear_page_64.o copy_page_64.o |
| 25 | lib-y += memmove_64.o memset_64.o | 22 | lib-y += memmove_64.o memset_64.o |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 24e60944971a..9e68075544f6 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
| @@ -14,6 +14,13 @@ | |||
| 14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
| 15 | #include <asm/mmx.h> | 15 | #include <asm/mmx.h> |
| 16 | 16 | ||
| 17 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
| 18 | /* | ||
| 19 | * Alignment at which movsl is preferred for bulk memory copies. | ||
| 20 | */ | ||
| 21 | struct movsl_mask movsl_mask __read_mostly; | ||
| 22 | #endif | ||
| 23 | |||
| 17 | static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) | 24 | static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) |
| 18 | { | 25 | { |
| 19 | #ifdef CONFIG_X86_INTEL_USERCOPY | 26 | #ifdef CONFIG_X86_INTEL_USERCOPY |
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 4706de7676b1..6730f4e7c744 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile | |||
| @@ -9,4 +9,3 @@ obj-$(CONFIG_X86_NUMAQ) += numaq.o | |||
| 9 | obj-$(CONFIG_X86_SUMMIT) += summit.o | 9 | obj-$(CONFIG_X86_SUMMIT) += summit.o |
| 10 | obj-$(CONFIG_X86_BIGSMP) += bigsmp.o | 10 | obj-$(CONFIG_X86_BIGSMP) += bigsmp.o |
| 11 | obj-$(CONFIG_X86_ES7000) += es7000.o | 11 | obj-$(CONFIG_X86_ES7000) += es7000.o |
| 12 | obj-$(CONFIG_X86_ES7000) += ../../x86/es7000/ | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index fb30486c82f7..83e13f2d53d2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -88,6 +88,62 @@ early_param("gbpages", parse_direct_gbpages_on); | |||
| 88 | 88 | ||
| 89 | int after_bootmem; | 89 | int after_bootmem; |
| 90 | 90 | ||
| 91 | unsigned long __supported_pte_mask __read_mostly = ~0UL; | ||
| 92 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | ||
| 93 | |||
| 94 | static int do_not_nx __cpuinitdata; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * noexec=on|off | ||
| 98 | * Control non-executable mappings for 64-bit processes. | ||
| 99 | * | ||
| 100 | * on Enable (default) | ||
| 101 | * off Disable | ||
| 102 | */ | ||
| 103 | static int __init nonx_setup(char *str) | ||
| 104 | { | ||
| 105 | if (!str) | ||
| 106 | return -EINVAL; | ||
| 107 | if (!strncmp(str, "on", 2)) { | ||
| 108 | __supported_pte_mask |= _PAGE_NX; | ||
| 109 | do_not_nx = 0; | ||
| 110 | } else if (!strncmp(str, "off", 3)) { | ||
| 111 | do_not_nx = 1; | ||
| 112 | __supported_pte_mask &= ~_PAGE_NX; | ||
| 113 | } | ||
| 114 | return 0; | ||
| 115 | } | ||
| 116 | early_param("noexec", nonx_setup); | ||
| 117 | |||
| 118 | void __cpuinit check_efer(void) | ||
| 119 | { | ||
| 120 | unsigned long efer; | ||
| 121 | |||
| 122 | rdmsrl(MSR_EFER, efer); | ||
| 123 | if (!(efer & EFER_NX) || do_not_nx) | ||
| 124 | __supported_pte_mask &= ~_PAGE_NX; | ||
| 125 | } | ||
| 126 | |||
| 127 | int force_personality32; | ||
| 128 | |||
| 129 | /* | ||
| 130 | * noexec32=on|off | ||
| 131 | * Control non executable heap for 32bit processes. | ||
| 132 | * To control the stack too use noexec=off | ||
| 133 | * | ||
| 134 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | ||
| 135 | * off PROT_READ implies PROT_EXEC | ||
| 136 | */ | ||
| 137 | static int __init nonx32_setup(char *str) | ||
| 138 | { | ||
| 139 | if (!strcmp(str, "on")) | ||
| 140 | force_personality32 &= ~READ_IMPLIES_EXEC; | ||
| 141 | else if (!strcmp(str, "off")) | ||
| 142 | force_personality32 |= READ_IMPLIES_EXEC; | ||
| 143 | return 1; | ||
| 144 | } | ||
| 145 | __setup("noexec32=", nonx32_setup); | ||
| 146 | |||
| 91 | /* | 147 | /* |
| 92 | * NOTE: This function is marked __ref because it calls __init function | 148 | * NOTE: This function is marked __ref because it calls __init function |
| 93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | 149 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 8791fc55e715..844df0cbbd3e 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
| 34 | 34 | ||
| 35 | #include <asm/pat.h> | 35 | #include <asm/pat.h> |
| 36 | #include <asm/e820.h> | ||
| 36 | 37 | ||
| 37 | #include "pci.h" | 38 | #include "pci.h" |
| 38 | 39 | ||
| @@ -227,6 +228,8 @@ void __init pcibios_resource_survey(void) | |||
| 227 | pcibios_allocate_bus_resources(&pci_root_buses); | 228 | pcibios_allocate_bus_resources(&pci_root_buses); |
| 228 | pcibios_allocate_resources(0); | 229 | pcibios_allocate_resources(0); |
| 229 | pcibios_allocate_resources(1); | 230 | pcibios_allocate_resources(1); |
| 231 | |||
| 232 | e820_reserve_resources_late(); | ||
| 230 | } | 233 | } |
| 231 | 234 | ||
| 232 | /** | 235 | /** |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index d9635764ce3d..654a2234f8f3 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
| @@ -209,7 +209,7 @@ static int __init pci_mmcfg_check_hostbridge(void) | |||
| 209 | return name != NULL; | 209 | return name != NULL; |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | static void __init pci_mmcfg_insert_resources(unsigned long resource_flags) | 212 | static void __init pci_mmcfg_insert_resources(void) |
| 213 | { | 213 | { |
| 214 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 | 214 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 |
| 215 | int i; | 215 | int i; |
| @@ -233,7 +233,7 @@ static void __init pci_mmcfg_insert_resources(unsigned long resource_flags) | |||
| 233 | cfg->pci_segment); | 233 | cfg->pci_segment); |
| 234 | res->start = cfg->address; | 234 | res->start = cfg->address; |
| 235 | res->end = res->start + (num_buses << 20) - 1; | 235 | res->end = res->start + (num_buses << 20) - 1; |
| 236 | res->flags = IORESOURCE_MEM | resource_flags; | 236 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
| 237 | insert_resource(&iomem_resource, res); | 237 | insert_resource(&iomem_resource, res); |
| 238 | names += PCI_MMCFG_RESOURCE_NAME_LEN; | 238 | names += PCI_MMCFG_RESOURCE_NAME_LEN; |
| 239 | } | 239 | } |
| @@ -434,11 +434,9 @@ static void __init __pci_mmcfg_init(int early) | |||
| 434 | (pci_mmcfg_config[0].address == 0)) | 434 | (pci_mmcfg_config[0].address == 0)) |
| 435 | return; | 435 | return; |
| 436 | 436 | ||
| 437 | if (pci_mmcfg_arch_init()) { | 437 | if (pci_mmcfg_arch_init()) |
| 438 | if (known_bridge) | ||
| 439 | pci_mmcfg_insert_resources(IORESOURCE_BUSY); | ||
| 440 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 438 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; |
| 441 | } else { | 439 | else { |
| 442 | /* | 440 | /* |
| 443 | * Signal not to attempt to insert mmcfg resources because | 441 | * Signal not to attempt to insert mmcfg resources because |
| 444 | * the architecture mmcfg setup could not initialize. | 442 | * the architecture mmcfg setup could not initialize. |
| @@ -475,7 +473,7 @@ static int __init pci_mmcfg_late_insert_resources(void) | |||
| 475 | * marked so it won't cause request errors when __request_region is | 473 | * marked so it won't cause request errors when __request_region is |
| 476 | * called. | 474 | * called. |
| 477 | */ | 475 | */ |
| 478 | pci_mmcfg_insert_resources(0); | 476 | pci_mmcfg_insert_resources(); |
| 479 | 477 | ||
| 480 | return 0; | 478 | return 0; |
| 481 | } | 479 | } |
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index d3e083dea720..274d06082f48 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/suspend.h> | 11 | #include <linux/suspend.h> |
| 12 | #include <asm/mtrr.h> | 12 | #include <asm/mtrr.h> |
| 13 | #include <asm/mce.h> | 13 | #include <asm/mce.h> |
| 14 | #include <asm/xcr.h> | ||
| 14 | 15 | ||
| 15 | static struct saved_context saved_context; | 16 | static struct saved_context saved_context; |
| 16 | 17 | ||
| @@ -126,6 +127,12 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
| 126 | if (boot_cpu_has(X86_FEATURE_SEP)) | 127 | if (boot_cpu_has(X86_FEATURE_SEP)) |
| 127 | enable_sep_cpu(); | 128 | enable_sep_cpu(); |
| 128 | 129 | ||
| 130 | /* | ||
| 131 | * restore XCR0 for xsave capable cpu's. | ||
| 132 | */ | ||
| 133 | if (cpu_has_xsave) | ||
| 134 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
| 135 | |||
| 129 | fix_processor_context(); | 136 | fix_processor_context(); |
| 130 | do_fpu_end(); | 137 | do_fpu_end(); |
| 131 | mtrr_ap_init(); | 138 | mtrr_ap_init(); |
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 66bdfb591fd8..e3b6cf70d62c 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
| 16 | #include <asm/mtrr.h> | 16 | #include <asm/mtrr.h> |
| 17 | #include <asm/xcr.h> | ||
| 17 | 18 | ||
| 18 | static void fix_processor_context(void); | 19 | static void fix_processor_context(void); |
| 19 | 20 | ||
| @@ -122,6 +123,12 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
| 122 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | 123 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); |
| 123 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 124 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
| 124 | 125 | ||
| 126 | /* | ||
| 127 | * restore XCR0 for xsave capable cpu's. | ||
| 128 | */ | ||
| 129 | if (cpu_has_xsave) | ||
| 130 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
| 131 | |||
| 125 | fix_processor_context(); | 132 | fix_processor_context(); |
| 126 | 133 | ||
| 127 | do_fpu_end(); | 134 | do_fpu_end(); |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index cb752ba72466..7440a0dceddb 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
| @@ -385,6 +385,7 @@ | |||
| 385 | . = ALIGN(align); \ | 385 | . = ALIGN(align); \ |
| 386 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 386 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
| 387 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 387 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ |
| 388 | *(.data.percpu.page_aligned) \ | ||
| 388 | *(.data.percpu) \ | 389 | *(.data.percpu) \ |
| 389 | *(.data.percpu.shared_aligned) \ | 390 | *(.data.percpu.shared_aligned) \ |
| 390 | } \ | 391 | } \ |
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index 4761c461d23a..dc604985f2ad 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h | |||
| @@ -2,6 +2,11 @@ | |||
| 2 | #define ASM_X86__BUGS_H | 2 | #define ASM_X86__BUGS_H |
| 3 | 3 | ||
| 4 | extern void check_bugs(void); | 4 | extern void check_bugs(void); |
| 5 | |||
| 6 | #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) | ||
| 5 | int ppro_with_ram_bug(void); | 7 | int ppro_with_ram_bug(void); |
| 8 | #else | ||
| 9 | static inline int ppro_with_ram_bug(void) { return 0; } | ||
| 10 | #endif | ||
| 6 | 11 | ||
| 7 | #endif /* ASM_X86__BUGS_H */ | 12 | #endif /* ASM_X86__BUGS_H */ |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 065c6a86ed80..adfeae6586e1 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
| @@ -6,7 +6,13 @@ | |||
| 6 | 6 | ||
| 7 | #include <asm/required-features.h> | 7 | #include <asm/required-features.h> |
| 8 | 8 | ||
| 9 | #define NCAPINTS 8 /* N 32-bit words worth of info */ | 9 | #define NCAPINTS 9 /* N 32-bit words worth of info */ |
| 10 | |||
| 11 | /* | ||
| 12 | * Note: If the comment begins with a quoted string, that string is used | ||
| 13 | * in /proc/cpuinfo instead of the macro name. If the string is "", | ||
| 14 | * this feature bit is not displayed in /proc/cpuinfo at all. | ||
| 15 | */ | ||
| 10 | 16 | ||
| 11 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | 17 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
| 12 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | 18 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
| @@ -14,7 +20,7 @@ | |||
| 14 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | 20 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
| 15 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | 21 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
| 16 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | 22 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
| 17 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | 23 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
| 18 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | 24 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
| 19 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | 25 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ |
| 20 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | 26 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
| @@ -23,22 +29,23 @@ | |||
| 23 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | 29 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
| 24 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | 30 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
| 25 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | 31 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
| 26 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | 32 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
| 33 | /* (plus FCMOVcc, FCOMI with FPU) */ | ||
| 27 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | 34 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
| 28 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 35 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
| 29 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 36 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
| 30 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | 37 | #define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
| 31 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ | 38 | #define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
| 32 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 39 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
| 33 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 40 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
| 34 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | 41 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
| 35 | /* of FPU context), and CR4.OSFXSR available */ | 42 | #define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
| 36 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | 43 | #define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
| 37 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | 44 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
| 38 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
| 39 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | 45 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
| 40 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | 46 | #define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
| 41 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | 47 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
| 48 | #define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ | ||
| 42 | 49 | ||
| 43 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | 50 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
| 44 | /* Don't duplicate feature flags which are redundant with Intel! */ | 51 | /* Don't duplicate feature flags which are redundant with Intel! */ |
| @@ -46,7 +53,8 @@ | |||
| 46 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | 53 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
| 47 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | 54 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
| 48 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | 55 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
| 49 | #define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ | 56 | #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
| 57 | #define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ | ||
| 50 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | 58 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
| 51 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | 59 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
| 52 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | 60 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
| @@ -64,54 +72,79 @@ | |||
| 64 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | 72 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
| 65 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | 73 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
| 66 | /* cpu types for specific tunings: */ | 74 | /* cpu types for specific tunings: */ |
| 67 | #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ | 75 | #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
| 68 | #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ | 76 | #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
| 69 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | 77 | #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
| 70 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | 78 | #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
| 71 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 79 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
| 72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 80 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
| 73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 81 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
| 74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 82 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
| 83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | ||
| 75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | 84 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
| 76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | 85 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
| 77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ | 86 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
| 78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ | 87 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
| 79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | 88 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
| 80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | 89 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
| 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 90 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
| 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 91 | #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
| 83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | 92 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
| 84 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ | 93 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ |
| 94 | #define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ | ||
| 85 | 95 | ||
| 86 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 96 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
| 87 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 97 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
| 88 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | 98 | #define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
| 89 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | 99 | #define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
| 100 | #define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ | ||
| 101 | #define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ | ||
| 102 | #define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ | ||
| 103 | #define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ | ||
| 90 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | 104 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
| 91 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | 105 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
| 106 | #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ | ||
| 92 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | 107 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ |
| 108 | #define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ | ||
| 93 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | 109 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
| 94 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | 110 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
| 111 | #define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ | ||
| 95 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | 112 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
| 113 | #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ | ||
| 114 | #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ | ||
| 96 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ | 115 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ |
| 97 | #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ | 116 | #define X86_FEATURE_AES (4*32+25) /* AES instructions */ |
| 117 | #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ | ||
| 118 | #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ | ||
| 119 | #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ | ||
| 98 | 120 | ||
| 99 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | 121 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
| 100 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | 122 | #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
| 101 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | 123 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
| 102 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | 124 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
| 103 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | 125 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
| 104 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ | 126 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
| 105 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ | 127 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
| 106 | #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ | 128 | #define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
| 107 | #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ | 129 | #define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
| 108 | #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ | 130 | #define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
| 109 | #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ | 131 | #define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
| 110 | 132 | ||
| 111 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | 133 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
| 112 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | 134 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
| 113 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | 135 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
| 114 | #define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */ | 136 | #define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
| 137 | #define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ | ||
| 138 | #define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ | ||
| 139 | #define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ | ||
| 140 | #define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ | ||
| 141 | #define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ | ||
| 142 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | ||
| 143 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | ||
| 144 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | ||
| 145 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | ||
| 146 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | ||
| 147 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | ||
| 115 | 148 | ||
| 116 | /* | 149 | /* |
| 117 | * Auxiliary flags: Linux defined - For features scattered in various | 150 | * Auxiliary flags: Linux defined - For features scattered in various |
| @@ -119,6 +152,13 @@ | |||
| 119 | */ | 152 | */ |
| 120 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ | 153 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
| 121 | 154 | ||
| 155 | /* Virtualization flags: Linux defined */ | ||
| 156 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | ||
| 157 | #define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ | ||
| 158 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ | ||
| 159 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ | ||
| 160 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ | ||
| 161 | |||
| 122 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 162 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
| 123 | 163 | ||
| 124 | #include <linux/bitops.h> | 164 | #include <linux/bitops.h> |
| @@ -152,7 +192,7 @@ extern const char * const x86_power_flags[32]; | |||
| 152 | } while (0) | 192 | } while (0) |
| 153 | #define setup_force_cpu_cap(bit) do { \ | 193 | #define setup_force_cpu_cap(bit) do { \ |
| 154 | set_cpu_cap(&boot_cpu_data, bit); \ | 194 | set_cpu_cap(&boot_cpu_data, bit); \ |
| 155 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ | 195 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ |
| 156 | } while (0) | 196 | } while (0) |
| 157 | 197 | ||
| 158 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 198 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
| @@ -193,8 +233,10 @@ extern const char * const x86_power_flags[32]; | |||
| 193 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 233 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
| 194 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | 234 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
| 195 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | 235 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
| 196 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | 236 | #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) |
| 197 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) | 237 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
| 238 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | ||
| 239 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | ||
| 198 | 240 | ||
| 199 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 241 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
| 200 | # define cpu_has_invlpg 1 | 242 | # define cpu_has_invlpg 1 |
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index f52daf176bcb..5abbdec06bd2 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #define E820_RESERVED 2 | 43 | #define E820_RESERVED 2 |
| 44 | #define E820_ACPI 3 | 44 | #define E820_ACPI 3 |
| 45 | #define E820_NVS 4 | 45 | #define E820_NVS 4 |
| 46 | #define E820_UNUSABLE 5 | ||
| 46 | 47 | ||
| 47 | /* reserved RAM used by kernel itself */ | 48 | /* reserved RAM used by kernel itself */ |
| 48 | #define E820_RESERVED_KERN 128 | 49 | #define E820_RESERVED_KERN 128 |
| @@ -121,6 +122,7 @@ extern void e820_register_active_regions(int nid, unsigned long start_pfn, | |||
| 121 | extern u64 e820_hole_size(u64 start, u64 end); | 122 | extern u64 e820_hole_size(u64 start, u64 end); |
| 122 | extern void finish_e820_parsing(void); | 123 | extern void finish_e820_parsing(void); |
| 123 | extern void e820_reserve_resources(void); | 124 | extern void e820_reserve_resources(void); |
| 125 | extern void e820_reserve_resources_late(void); | ||
| 124 | extern void setup_memory_map(void); | 126 | extern void setup_memory_map(void); |
| 125 | extern char *default_machine_specific_memory_setup(void); | 127 | extern char *default_machine_specific_memory_setup(void); |
| 126 | extern char *machine_specific_memory_setup(void); | 128 | extern char *machine_specific_memory_setup(void); |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 1ecdc3ed96e4..9ba862a4eac0 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
| @@ -19,7 +19,9 @@ | |||
| 19 | #include <asm/sigcontext.h> | 19 | #include <asm/sigcontext.h> |
| 20 | #include <asm/user.h> | 20 | #include <asm/user.h> |
| 21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
| 22 | #include <asm/xsave.h> | ||
| 22 | 23 | ||
| 24 | extern unsigned int sig_xstate_size; | ||
| 23 | extern void fpu_init(void); | 25 | extern void fpu_init(void); |
| 24 | extern void mxcsr_feature_mask_init(void); | 26 | extern void mxcsr_feature_mask_init(void); |
| 25 | extern int init_fpu(struct task_struct *child); | 27 | extern int init_fpu(struct task_struct *child); |
| @@ -31,12 +33,18 @@ extern user_regset_active_fn fpregs_active, xfpregs_active; | |||
| 31 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | 33 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; |
| 32 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | 34 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; |
| 33 | 35 | ||
| 36 | extern struct _fpx_sw_bytes fx_sw_reserved; | ||
| 34 | #ifdef CONFIG_IA32_EMULATION | 37 | #ifdef CONFIG_IA32_EMULATION |
| 38 | extern unsigned int sig_xstate_ia32_size; | ||
| 39 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
| 35 | struct _fpstate_ia32; | 40 | struct _fpstate_ia32; |
| 36 | extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); | 41 | struct _xstate_ia32; |
| 37 | extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); | 42 | extern int save_i387_xstate_ia32(void __user *buf); |
| 43 | extern int restore_i387_xstate_ia32(void __user *buf); | ||
| 38 | #endif | 44 | #endif |
| 39 | 45 | ||
| 46 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
| 47 | |||
| 40 | #ifdef CONFIG_X86_64 | 48 | #ifdef CONFIG_X86_64 |
| 41 | 49 | ||
| 42 | /* Ignore delayed exceptions from user space */ | 50 | /* Ignore delayed exceptions from user space */ |
| @@ -47,7 +55,7 @@ static inline void tolerant_fwait(void) | |||
| 47 | _ASM_EXTABLE(1b, 2b)); | 55 | _ASM_EXTABLE(1b, 2b)); |
| 48 | } | 56 | } |
| 49 | 57 | ||
| 50 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 58 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
| 51 | { | 59 | { |
| 52 | int err; | 60 | int err; |
| 53 | 61 | ||
| @@ -67,15 +75,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
| 67 | return err; | 75 | return err; |
| 68 | } | 76 | } |
| 69 | 77 | ||
| 70 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 78 | static inline int restore_fpu_checking(struct task_struct *tsk) |
| 79 | { | ||
| 80 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
| 81 | return xrstor_checking(&tsk->thread.xstate->xsave); | ||
| 82 | else | ||
| 83 | return fxrstor_checking(&tsk->thread.xstate->fxsave); | ||
| 84 | } | ||
| 71 | 85 | ||
| 72 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | 86 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception |
| 73 | is pending. Clear the x87 state here by setting it to fixed | 87 | is pending. Clear the x87 state here by setting it to fixed |
| 74 | values. The kernel data segment can be sometimes 0 and sometimes | 88 | values. The kernel data segment can be sometimes 0 and sometimes |
| 75 | new user value. Both should be ok. | 89 | new user value. Both should be ok. |
| 76 | Use the PDA as safe address because it should be already in L1. */ | 90 | Use the PDA as safe address because it should be already in L1. */ |
| 77 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | 91 | static inline void clear_fpu_state(struct task_struct *tsk) |
| 78 | { | 92 | { |
| 93 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | ||
| 94 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * xsave header may indicate the init state of the FP. | ||
| 98 | */ | ||
| 99 | if ((task_thread_info(tsk)->status & TS_XSAVE) && | ||
| 100 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
| 101 | return; | ||
| 102 | |||
| 79 | if (unlikely(fx->swd & X87_FSW_ES)) | 103 | if (unlikely(fx->swd & X87_FSW_ES)) |
| 80 | asm volatile("fnclex"); | 104 | asm volatile("fnclex"); |
| 81 | alternative_input(ASM_NOP8 ASM_NOP2, | 105 | alternative_input(ASM_NOP8 ASM_NOP2, |
| @@ -84,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | |||
| 84 | X86_FEATURE_FXSAVE_LEAK); | 108 | X86_FEATURE_FXSAVE_LEAK); |
| 85 | } | 109 | } |
| 86 | 110 | ||
| 87 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | 111 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
| 88 | { | 112 | { |
| 89 | int err; | 113 | int err; |
| 90 | 114 | ||
| @@ -108,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
| 108 | return err; | 132 | return err; |
| 109 | } | 133 | } |
| 110 | 134 | ||
| 111 | static inline void __save_init_fpu(struct task_struct *tsk) | 135 | static inline void fxsave(struct task_struct *tsk) |
| 112 | { | 136 | { |
| 113 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | 137 | /* Using "rex64; fxsave %0" is broken because, if the memory operand |
| 114 | uses any extended registers for addressing, a second REX prefix | 138 | uses any extended registers for addressing, a second REX prefix |
| @@ -133,7 +157,16 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
| 133 | : "=m" (tsk->thread.xstate->fxsave) | 157 | : "=m" (tsk->thread.xstate->fxsave) |
| 134 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); | 158 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); |
| 135 | #endif | 159 | #endif |
| 136 | clear_fpu_state(&tsk->thread.xstate->fxsave); | 160 | } |
| 161 | |||
| 162 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
| 163 | { | ||
| 164 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
| 165 | xsave(tsk); | ||
| 166 | else | ||
| 167 | fxsave(tsk); | ||
| 168 | |||
| 169 | clear_fpu_state(tsk); | ||
| 137 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 170 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
| 138 | } | 171 | } |
| 139 | 172 | ||
| @@ -148,6 +181,10 @@ static inline void tolerant_fwait(void) | |||
| 148 | 181 | ||
| 149 | static inline void restore_fpu(struct task_struct *tsk) | 182 | static inline void restore_fpu(struct task_struct *tsk) |
| 150 | { | 183 | { |
| 184 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
| 185 | xrstor_checking(&tsk->thread.xstate->xsave); | ||
| 186 | return; | ||
| 187 | } | ||
| 151 | /* | 188 | /* |
| 152 | * The "nop" is needed to make the instructions the same | 189 | * The "nop" is needed to make the instructions the same |
| 153 | * length. | 190 | * length. |
| @@ -173,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk) | |||
| 173 | */ | 210 | */ |
| 174 | static inline void __save_init_fpu(struct task_struct *tsk) | 211 | static inline void __save_init_fpu(struct task_struct *tsk) |
| 175 | { | 212 | { |
| 213 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
| 214 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | ||
| 215 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | ||
| 216 | |||
| 217 | xsave(tsk); | ||
| 218 | |||
| 219 | /* | ||
| 220 | * xsave header may indicate the init state of the FP. | ||
| 221 | */ | ||
| 222 | if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
| 223 | goto end; | ||
| 224 | |||
| 225 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
| 226 | asm volatile("fnclex"); | ||
| 227 | |||
| 228 | /* | ||
| 229 | * we can do a simple return here or be paranoid :) | ||
| 230 | */ | ||
| 231 | goto clear_state; | ||
| 232 | } | ||
| 233 | |||
| 176 | /* Use more nops than strictly needed in case the compiler | 234 | /* Use more nops than strictly needed in case the compiler |
| 177 | varies code */ | 235 | varies code */ |
| 178 | alternative_input( | 236 | alternative_input( |
| @@ -182,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
| 182 | X86_FEATURE_FXSR, | 240 | X86_FEATURE_FXSR, |
| 183 | [fx] "m" (tsk->thread.xstate->fxsave), | 241 | [fx] "m" (tsk->thread.xstate->fxsave), |
| 184 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); | 242 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); |
| 243 | clear_state: | ||
| 185 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 244 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
| 186 | is pending. Clear the x87 state here by setting it to fixed | 245 | is pending. Clear the x87 state here by setting it to fixed |
| 187 | values. safe_address is a random variable that should be in L1 */ | 246 | values. safe_address is a random variable that should be in L1 */ |
| @@ -191,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
| 191 | "fildl %[addr]", /* set F?P to defined value */ | 250 | "fildl %[addr]", /* set F?P to defined value */ |
| 192 | X86_FEATURE_FXSAVE_LEAK, | 251 | X86_FEATURE_FXSAVE_LEAK, |
| 193 | [addr] "m" (safe_address)); | 252 | [addr] "m" (safe_address)); |
| 253 | end: | ||
| 194 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 254 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
| 195 | } | 255 | } |
| 196 | 256 | ||
| 257 | #endif /* CONFIG_X86_64 */ | ||
| 258 | |||
| 197 | /* | 259 | /* |
| 198 | * Signal frame handlers... | 260 | * Signal frame handlers... |
| 199 | */ | 261 | */ |
| 200 | extern int save_i387(struct _fpstate __user *buf); | 262 | extern int save_i387_xstate(void __user *buf); |
| 201 | extern int restore_i387(struct _fpstate __user *buf); | 263 | extern int restore_i387_xstate(void __user *buf); |
| 202 | |||
| 203 | #endif /* CONFIG_X86_64 */ | ||
| 204 | 264 | ||
| 205 | static inline void __unlazy_fpu(struct task_struct *tsk) | 265 | static inline void __unlazy_fpu(struct task_struct *tsk) |
| 206 | { | 266 | { |
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 3052f058ab06..0bb43301a202 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
| @@ -176,6 +176,7 @@ | |||
| 176 | #define MSR_IA32_TSC 0x00000010 | 176 | #define MSR_IA32_TSC 0x00000010 |
| 177 | #define MSR_IA32_PLATFORM_ID 0x00000017 | 177 | #define MSR_IA32_PLATFORM_ID 0x00000017 |
| 178 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a | 178 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a |
| 179 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a | ||
| 179 | 180 | ||
| 180 | #define MSR_IA32_APICBASE 0x0000001b | 181 | #define MSR_IA32_APICBASE 0x0000001b |
| 181 | #define MSR_IA32_APICBASE_BSP (1<<8) | 182 | #define MSR_IA32_APICBASE_BSP (1<<8) |
| @@ -310,4 +311,19 @@ | |||
| 310 | /* Geode defined MSRs */ | 311 | /* Geode defined MSRs */ |
| 311 | #define MSR_GEODE_BUSCONT_CONF0 0x00001900 | 312 | #define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
| 312 | 313 | ||
| 314 | /* Intel VT MSRs */ | ||
| 315 | #define MSR_IA32_VMX_BASIC 0x00000480 | ||
| 316 | #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 | ||
| 317 | #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 | ||
| 318 | #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 | ||
| 319 | #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 | ||
| 320 | #define MSR_IA32_VMX_MISC 0x00000485 | ||
| 321 | #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 | ||
| 322 | #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 | ||
| 323 | #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 | ||
| 324 | #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 | ||
| 325 | #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a | ||
| 326 | #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b | ||
| 327 | #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c | ||
| 328 | |||
| 313 | #endif /* ASM_X86__MSR_INDEX_H */ | 329 | #endif /* ASM_X86__MSR_INDEX_H */ |
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h index 97568ada1f97..1198f2a0e42c 100644 --- a/include/asm-x86/processor-cyrix.h +++ b/include/asm-x86/processor-cyrix.h | |||
| @@ -28,3 +28,11 @@ static inline void setCx86(u8 reg, u8 data) | |||
| 28 | outb(reg, 0x22); | 28 | outb(reg, 0x22); |
| 29 | outb(data, 0x23); | 29 | outb(data, 0x23); |
| 30 | } | 30 | } |
| 31 | |||
| 32 | #define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); }) | ||
| 33 | |||
| 34 | #define setCx86_old(reg, data) do { \ | ||
| 35 | outb((reg), 0x22); \ | ||
| 36 | outb((data), 0x23); \ | ||
| 37 | } while (0) | ||
| 38 | |||
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h index 5dd79774f693..dc5f0712f9fa 100644 --- a/include/asm-x86/processor-flags.h +++ b/include/asm-x86/processor-flags.h | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ | 59 | #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ |
| 60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ | 60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ |
| 61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ | 61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ |
| 62 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | ||
| 62 | 63 | ||
| 63 | /* | 64 | /* |
| 64 | * x86-64 Task Priority Register, CR8 | 65 | * x86-64 Task Priority Register, CR8 |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 5eaf9bf0a623..c7d35464a4bb 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
| @@ -76,11 +76,11 @@ struct cpuinfo_x86 { | |||
| 76 | int x86_tlbsize; | 76 | int x86_tlbsize; |
| 77 | __u8 x86_virt_bits; | 77 | __u8 x86_virt_bits; |
| 78 | __u8 x86_phys_bits; | 78 | __u8 x86_phys_bits; |
| 79 | #endif | ||
| 79 | /* CPUID returned core id bits: */ | 80 | /* CPUID returned core id bits: */ |
| 80 | __u8 x86_coreid_bits; | 81 | __u8 x86_coreid_bits; |
| 81 | /* Max extended CPUID function supported: */ | 82 | /* Max extended CPUID function supported: */ |
| 82 | __u32 extended_cpuid_level; | 83 | __u32 extended_cpuid_level; |
| 83 | #endif | ||
| 84 | /* Maximum supported CPUID level, -1=no CPUID: */ | 84 | /* Maximum supported CPUID level, -1=no CPUID: */ |
| 85 | int cpuid_level; | 85 | int cpuid_level; |
| 86 | __u32 x86_capability[NCAPINTS]; | 86 | __u32 x86_capability[NCAPINTS]; |
| @@ -166,11 +166,8 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | |||
| 166 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 166 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
| 167 | extern unsigned short num_cache_leaves; | 167 | extern unsigned short num_cache_leaves; |
| 168 | 168 | ||
| 169 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | 169 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
| 170 | extern void detect_ht(struct cpuinfo_x86 *c); | 170 | extern void detect_ht(struct cpuinfo_x86 *c); |
| 171 | #else | ||
| 172 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
| 173 | #endif | ||
| 174 | 171 | ||
| 175 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | 172 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
| 176 | unsigned int *ecx, unsigned int *edx) | 173 | unsigned int *ecx, unsigned int *edx) |
| @@ -327,7 +324,12 @@ struct i387_fxsave_struct { | |||
| 327 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | 324 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ |
| 328 | u32 xmm_space[64]; | 325 | u32 xmm_space[64]; |
| 329 | 326 | ||
| 330 | u32 padding[24]; | 327 | u32 padding[12]; |
| 328 | |||
| 329 | union { | ||
| 330 | u32 padding1[12]; | ||
| 331 | u32 sw_reserved[12]; | ||
| 332 | }; | ||
| 331 | 333 | ||
| 332 | } __attribute__((aligned(16))); | 334 | } __attribute__((aligned(16))); |
| 333 | 335 | ||
| @@ -351,10 +353,23 @@ struct i387_soft_struct { | |||
| 351 | u32 entry_eip; | 353 | u32 entry_eip; |
| 352 | }; | 354 | }; |
| 353 | 355 | ||
| 356 | struct xsave_hdr_struct { | ||
| 357 | u64 xstate_bv; | ||
| 358 | u64 reserved1[2]; | ||
| 359 | u64 reserved2[5]; | ||
| 360 | } __attribute__((packed)); | ||
| 361 | |||
| 362 | struct xsave_struct { | ||
| 363 | struct i387_fxsave_struct i387; | ||
| 364 | struct xsave_hdr_struct xsave_hdr; | ||
| 365 | /* new processor state extensions will go here */ | ||
| 366 | } __attribute__ ((packed, aligned (64))); | ||
| 367 | |||
| 354 | union thread_xstate { | 368 | union thread_xstate { |
| 355 | struct i387_fsave_struct fsave; | 369 | struct i387_fsave_struct fsave; |
| 356 | struct i387_fxsave_struct fxsave; | 370 | struct i387_fxsave_struct fxsave; |
| 357 | struct i387_soft_struct soft; | 371 | struct i387_soft_struct soft; |
| 372 | struct xsave_struct xsave; | ||
| 358 | }; | 373 | }; |
| 359 | 374 | ||
| 360 | #ifdef CONFIG_X86_64 | 375 | #ifdef CONFIG_X86_64 |
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index 24879c85b291..ee813f4fe5d5 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h | |||
| @@ -4,6 +4,40 @@ | |||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | #include <asm/types.h> | 5 | #include <asm/types.h> |
| 6 | 6 | ||
| 7 | #define FP_XSTATE_MAGIC1 0x46505853U | ||
| 8 | #define FP_XSTATE_MAGIC2 0x46505845U | ||
| 9 | #define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) | ||
| 10 | |||
| 11 | /* | ||
| 12 | * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame | ||
| 13 | * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes | ||
| 14 | * are used to extended the fpstate pointer in the sigcontext, which now | ||
| 15 | * includes the extended state information along with fpstate information. | ||
| 16 | * | ||
| 17 | * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved | ||
| 18 | * area and FP_XSTATE_MAGIC2 at the end of memory layout | ||
| 19 | * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the | ||
| 20 | * extended state information in the memory layout pointed by the fpstate | ||
| 21 | * pointer in sigcontext. | ||
| 22 | */ | ||
| 23 | struct _fpx_sw_bytes { | ||
| 24 | __u32 magic1; /* FP_XSTATE_MAGIC1 */ | ||
| 25 | __u32 extended_size; /* total size of the layout referred by | ||
| 26 | * fpstate pointer in the sigcontext. | ||
| 27 | */ | ||
| 28 | __u64 xstate_bv; | ||
| 29 | /* feature bit mask (including fp/sse/extended | ||
| 30 | * state) that is present in the memory | ||
| 31 | * layout. | ||
| 32 | */ | ||
| 33 | __u32 xstate_size; /* actual xsave state size, based on the | ||
| 34 | * features saved in the layout. | ||
| 35 | * 'extended_size' will be greater than | ||
| 36 | * 'xstate_size'. | ||
| 37 | */ | ||
| 38 | __u32 padding[7]; /* for future use. */ | ||
| 39 | }; | ||
| 40 | |||
| 7 | #ifdef __i386__ | 41 | #ifdef __i386__ |
| 8 | /* | 42 | /* |
| 9 | * As documented in the iBCS2 standard.. | 43 | * As documented in the iBCS2 standard.. |
| @@ -53,7 +87,13 @@ struct _fpstate { | |||
| 53 | unsigned long reserved; | 87 | unsigned long reserved; |
| 54 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ | 88 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
| 55 | struct _xmmreg _xmm[8]; | 89 | struct _xmmreg _xmm[8]; |
| 56 | unsigned long padding[56]; | 90 | unsigned long padding1[44]; |
| 91 | |||
| 92 | union { | ||
| 93 | unsigned long padding2[12]; | ||
| 94 | struct _fpx_sw_bytes sw_reserved; /* represents the extended | ||
| 95 | * state info */ | ||
| 96 | }; | ||
| 57 | }; | 97 | }; |
| 58 | 98 | ||
| 59 | #define X86_FXSR_MAGIC 0x0000 | 99 | #define X86_FXSR_MAGIC 0x0000 |
| @@ -79,7 +119,15 @@ struct sigcontext { | |||
| 79 | unsigned long flags; | 119 | unsigned long flags; |
| 80 | unsigned long sp_at_signal; | 120 | unsigned long sp_at_signal; |
| 81 | unsigned short ss, __ssh; | 121 | unsigned short ss, __ssh; |
| 82 | struct _fpstate __user *fpstate; | 122 | |
| 123 | /* | ||
| 124 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | ||
| 125 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | ||
| 126 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | ||
| 127 | * of extended memory layout. See comments at the defintion of | ||
| 128 | * (struct _fpx_sw_bytes) | ||
| 129 | */ | ||
| 130 | void __user *fpstate; /* zero when no FPU/extended context */ | ||
| 83 | unsigned long oldmask; | 131 | unsigned long oldmask; |
| 84 | unsigned long cr2; | 132 | unsigned long cr2; |
| 85 | }; | 133 | }; |
| @@ -130,7 +178,12 @@ struct _fpstate { | |||
| 130 | __u32 mxcsr_mask; | 178 | __u32 mxcsr_mask; |
| 131 | __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ | 179 | __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ |
| 132 | __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ | 180 | __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ |
| 133 | __u32 reserved2[24]; | 181 | __u32 reserved2[12]; |
| 182 | union { | ||
| 183 | __u32 reserved3[12]; | ||
| 184 | struct _fpx_sw_bytes sw_reserved; /* represents the extended | ||
| 185 | * state information */ | ||
| 186 | }; | ||
| 134 | }; | 187 | }; |
| 135 | 188 | ||
| 136 | #ifdef __KERNEL__ | 189 | #ifdef __KERNEL__ |
| @@ -161,7 +214,15 @@ struct sigcontext { | |||
| 161 | unsigned long trapno; | 214 | unsigned long trapno; |
| 162 | unsigned long oldmask; | 215 | unsigned long oldmask; |
| 163 | unsigned long cr2; | 216 | unsigned long cr2; |
| 164 | struct _fpstate __user *fpstate; /* zero when no FPU context */ | 217 | |
| 218 | /* | ||
| 219 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | ||
| 220 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | ||
| 221 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | ||
| 222 | * of extended memory layout. See comments at the defintion of | ||
| 223 | * (struct _fpx_sw_bytes) | ||
| 224 | */ | ||
| 225 | void __user *fpstate; /* zero when no FPU/extended context */ | ||
| 165 | unsigned long reserved1[8]; | 226 | unsigned long reserved1[8]; |
| 166 | }; | 227 | }; |
| 167 | #else /* __KERNEL__ */ | 228 | #else /* __KERNEL__ */ |
| @@ -202,4 +263,22 @@ struct sigcontext { | |||
| 202 | 263 | ||
| 203 | #endif /* !__i386__ */ | 264 | #endif /* !__i386__ */ |
| 204 | 265 | ||
| 266 | struct _xsave_hdr { | ||
| 267 | __u64 xstate_bv; | ||
| 268 | __u64 reserved1[2]; | ||
| 269 | __u64 reserved2[5]; | ||
| 270 | }; | ||
| 271 | |||
| 272 | /* | ||
| 273 | * Extended state pointed by the fpstate pointer in the sigcontext. | ||
| 274 | * In addition to the fpstate, information encoded in the xstate_hdr | ||
| 275 | * indicates the presence of other extended state information | ||
| 276 | * supported by the processor and OS. | ||
| 277 | */ | ||
| 278 | struct _xstate { | ||
| 279 | struct _fpstate fpstate; | ||
| 280 | struct _xsave_hdr xstate_hdr; | ||
| 281 | /* new processor state extensions go here */ | ||
| 282 | }; | ||
| 283 | |||
| 205 | #endif /* ASM_X86__SIGCONTEXT_H */ | 284 | #endif /* ASM_X86__SIGCONTEXT_H */ |
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 4e2ec732dd01..8c347032c2f2 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h | |||
| @@ -40,7 +40,11 @@ struct _fpstate_ia32 { | |||
| 40 | __u32 reserved; | 40 | __u32 reserved; |
| 41 | struct _fpxreg _fxsr_st[8]; | 41 | struct _fpxreg _fxsr_st[8]; |
| 42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ | 42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ |
| 43 | __u32 padding[56]; | 43 | __u32 padding[44]; |
| 44 | union { | ||
| 45 | __u32 padding2[12]; | ||
| 46 | struct _fpx_sw_bytes sw_reserved; | ||
| 47 | }; | ||
| 44 | }; | 48 | }; |
| 45 | 49 | ||
| 46 | struct sigcontext_ia32 { | 50 | struct sigcontext_ia32 { |
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 4db0066a3a35..3f4e52bb77f5 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h | |||
| @@ -241,6 +241,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
| 241 | #define TS_POLLING 0x0004 /* true if in idle loop | 241 | #define TS_POLLING 0x0004 /* true if in idle loop |
| 242 | and not sleeping */ | 242 | and not sleeping */ |
| 243 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | 243 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
| 244 | #define TS_XSAVE 0x0010 /* Use xsave/xrstor */ | ||
| 244 | 245 | ||
| 245 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 246 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
| 246 | 247 | ||
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h index 9948dd328084..89eaa5456a7e 100644 --- a/include/asm-x86/ucontext.h +++ b/include/asm-x86/ucontext.h | |||
| @@ -1,6 +1,12 @@ | |||
| 1 | #ifndef ASM_X86__UCONTEXT_H | 1 | #ifndef ASM_X86__UCONTEXT_H |
| 2 | #define ASM_X86__UCONTEXT_H | 2 | #define ASM_X86__UCONTEXT_H |
| 3 | 3 | ||
| 4 | #define UC_FP_XSTATE 0x1 /* indicates the presence of extended state | ||
| 5 | * information in the memory layout pointed | ||
| 6 | * by the fpstate pointer in the ucontext's | ||
| 7 | * sigcontext struct (uc_mcontext). | ||
| 8 | */ | ||
| 9 | |||
| 4 | struct ucontext { | 10 | struct ucontext { |
| 5 | unsigned long uc_flags; | 11 | unsigned long uc_flags; |
| 6 | struct ucontext *uc_link; | 12 | struct ucontext *uc_link; |
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h new file mode 100644 index 000000000000..f2cba4e79a23 --- /dev/null +++ b/include/asm-x86/xcr.h | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
| 2 | * | ||
| 3 | * Copyright 2008 rPath, Inc. - All Rights Reserved | ||
| 4 | * | ||
| 5 | * This file is part of the Linux kernel, and is made available under | ||
| 6 | * the terms of the GNU General Public License version 2 or (at your | ||
| 7 | * option) any later version; incorporated herein by reference. | ||
| 8 | * | ||
| 9 | * ----------------------------------------------------------------------- */ | ||
| 10 | |||
| 11 | /* | ||
| 12 | * asm-x86/xcr.h | ||
| 13 | * | ||
| 14 | * Definitions for the eXtended Control Register instructions | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _ASM_X86_XCR_H | ||
| 18 | #define _ASM_X86_XCR_H | ||
| 19 | |||
| 20 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | ||
| 21 | |||
| 22 | #ifdef __KERNEL__ | ||
| 23 | # ifndef __ASSEMBLY__ | ||
| 24 | |||
| 25 | #include <linux/types.h> | ||
| 26 | |||
| 27 | static inline u64 xgetbv(u32 index) | ||
| 28 | { | ||
| 29 | u32 eax, edx; | ||
| 30 | |||
| 31 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | ||
| 32 | : "=a" (eax), "=d" (edx) | ||
| 33 | : "c" (index)); | ||
| 34 | return eax + ((u64)edx << 32); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline void xsetbv(u32 index, u64 value) | ||
| 38 | { | ||
| 39 | u32 eax = value; | ||
| 40 | u32 edx = value >> 32; | ||
| 41 | |||
| 42 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | ||
| 43 | : : "a" (eax), "d" (edx), "c" (index)); | ||
| 44 | } | ||
| 45 | |||
| 46 | # endif /* __ASSEMBLY__ */ | ||
| 47 | #endif /* __KERNEL__ */ | ||
| 48 | |||
| 49 | #endif /* _ASM_X86_XCR_H */ | ||
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h new file mode 100644 index 000000000000..08e9a1ac07a9 --- /dev/null +++ b/include/asm-x86/xsave.h | |||
| @@ -0,0 +1,118 @@ | |||
| 1 | #ifndef __ASM_X86_XSAVE_H | ||
| 2 | #define __ASM_X86_XSAVE_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <asm/processor.h> | ||
| 6 | #include <asm/i387.h> | ||
| 7 | |||
| 8 | #define XSTATE_FP 0x1 | ||
| 9 | #define XSTATE_SSE 0x2 | ||
| 10 | |||
| 11 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | ||
| 12 | |||
| 13 | #define FXSAVE_SIZE 512 | ||
| 14 | |||
| 15 | /* | ||
| 16 | * These are the features that the OS can handle currently. | ||
| 17 | */ | ||
| 18 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) | ||
| 19 | |||
| 20 | #ifdef CONFIG_X86_64 | ||
| 21 | #define REX_PREFIX "0x48, " | ||
| 22 | #else | ||
| 23 | #define REX_PREFIX | ||
| 24 | #endif | ||
| 25 | |||
| 26 | extern unsigned int xstate_size; | ||
| 27 | extern u64 pcntxt_mask; | ||
| 28 | extern struct xsave_struct *init_xstate_buf; | ||
| 29 | |||
| 30 | extern void xsave_cntxt_init(void); | ||
| 31 | extern void xsave_init(void); | ||
| 32 | extern int init_fpu(struct task_struct *child); | ||
| 33 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
| 34 | void __user *fpstate, | ||
| 35 | struct _fpx_sw_bytes *sw); | ||
| 36 | |||
| 37 | static inline int xrstor_checking(struct xsave_struct *fx) | ||
| 38 | { | ||
| 39 | int err; | ||
| 40 | |||
| 41 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | ||
| 42 | "2:\n" | ||
| 43 | ".section .fixup,\"ax\"\n" | ||
| 44 | "3: movl $-1,%[err]\n" | ||
| 45 | " jmp 2b\n" | ||
| 46 | ".previous\n" | ||
| 47 | _ASM_EXTABLE(1b, 3b) | ||
| 48 | : [err] "=r" (err) | ||
| 49 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) | ||
| 50 | : "memory"); | ||
| 51 | |||
| 52 | return err; | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline int xsave_user(struct xsave_struct __user *buf) | ||
| 56 | { | ||
| 57 | int err; | ||
| 58 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | ||
| 59 | "2:\n" | ||
| 60 | ".section .fixup,\"ax\"\n" | ||
| 61 | "3: movl $-1,%[err]\n" | ||
| 62 | " jmp 2b\n" | ||
| 63 | ".previous\n" | ||
| 64 | ".section __ex_table,\"a\"\n" | ||
| 65 | _ASM_ALIGN "\n" | ||
| 66 | _ASM_PTR "1b,3b\n" | ||
| 67 | ".previous" | ||
| 68 | : [err] "=r" (err) | ||
| 69 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | ||
| 70 | : "memory"); | ||
| 71 | if (unlikely(err) && __clear_user(buf, xstate_size)) | ||
| 72 | err = -EFAULT; | ||
| 73 | /* No need to clear here because the caller clears USED_MATH */ | ||
| 74 | return err; | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) | ||
| 78 | { | ||
| 79 | int err; | ||
| 80 | struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); | ||
| 81 | u32 lmask = mask; | ||
| 82 | u32 hmask = mask >> 32; | ||
| 83 | |||
| 84 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | ||
| 85 | "2:\n" | ||
| 86 | ".section .fixup,\"ax\"\n" | ||
| 87 | "3: movl $-1,%[err]\n" | ||
| 88 | " jmp 2b\n" | ||
| 89 | ".previous\n" | ||
| 90 | ".section __ex_table,\"a\"\n" | ||
| 91 | _ASM_ALIGN "\n" | ||
| 92 | _ASM_PTR "1b,3b\n" | ||
| 93 | ".previous" | ||
| 94 | : [err] "=r" (err) | ||
| 95 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) | ||
| 96 | : "memory"); /* memory required? */ | ||
| 97 | return err; | ||
| 98 | } | ||
| 99 | |||
| 100 | static inline void xrstor_state(struct xsave_struct *fx, u64 mask) | ||
| 101 | { | ||
| 102 | u32 lmask = mask; | ||
| 103 | u32 hmask = mask >> 32; | ||
| 104 | |||
| 105 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | ||
| 106 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
| 107 | : "memory"); | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline void xsave(struct task_struct *tsk) | ||
| 111 | { | ||
| 112 | /* This, however, we can work around by forcing the compiler to select | ||
| 113 | an addressing mode that doesn't require extended registers. */ | ||
| 114 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | ||
| 115 | : : "D" (&(tsk->thread.xstate->xsave)), | ||
| 116 | "a" (-1), "d"(-1) : "memory"); | ||
| 117 | } | ||
| 118 | #endif | ||
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 350033e8f4e1..ee9bcc6f32b6 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
| @@ -108,6 +108,9 @@ extern struct resource iomem_resource; | |||
| 108 | 108 | ||
| 109 | extern int request_resource(struct resource *root, struct resource *new); | 109 | extern int request_resource(struct resource *root, struct resource *new); |
| 110 | extern int release_resource(struct resource *new); | 110 | extern int release_resource(struct resource *new); |
| 111 | extern void reserve_region_with_split(struct resource *root, | ||
| 112 | resource_size_t start, resource_size_t end, | ||
| 113 | const char *name); | ||
| 111 | extern int insert_resource(struct resource *parent, struct resource *new); | 114 | extern int insert_resource(struct resource *parent, struct resource *new); |
| 112 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); | 115 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); |
| 113 | extern int allocate_resource(struct resource *root, struct resource *new, | 116 | extern int allocate_resource(struct resource *root, struct resource *new, |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index fac3337547eb..9f2a3751873a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -23,12 +23,19 @@ | |||
| 23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ | 23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ |
| 24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | 24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ |
| 25 | ____cacheline_aligned_in_smp | 25 | ____cacheline_aligned_in_smp |
| 26 | |||
| 27 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
| 28 | __attribute__((__section__(".data.percpu.page_aligned"))) \ | ||
| 29 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
| 26 | #else | 30 | #else |
| 27 | #define DEFINE_PER_CPU(type, name) \ | 31 | #define DEFINE_PER_CPU(type, name) \ |
| 28 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 32 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
| 29 | 33 | ||
| 30 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 34 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
| 31 | DEFINE_PER_CPU(type, name) | 35 | DEFINE_PER_CPU(type, name) |
| 36 | |||
| 37 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
| 38 | DEFINE_PER_CPU(type, name) | ||
| 32 | #endif | 39 | #endif |
| 33 | 40 | ||
| 34 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 41 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
diff --git a/kernel/resource.c b/kernel/resource.c index 03d796c1b2e9..414d6fc9131e 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -516,6 +516,74 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t | |||
| 516 | return result; | 516 | return result; |
| 517 | } | 517 | } |
| 518 | 518 | ||
| 519 | static void __init __reserve_region_with_split(struct resource *root, | ||
| 520 | resource_size_t start, resource_size_t end, | ||
| 521 | const char *name) | ||
| 522 | { | ||
| 523 | struct resource *parent = root; | ||
| 524 | struct resource *conflict; | ||
| 525 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | ||
| 526 | |||
| 527 | if (!res) | ||
| 528 | return; | ||
| 529 | |||
| 530 | res->name = name; | ||
| 531 | res->start = start; | ||
| 532 | res->end = end; | ||
| 533 | res->flags = IORESOURCE_BUSY; | ||
| 534 | |||
| 535 | for (;;) { | ||
| 536 | conflict = __request_resource(parent, res); | ||
| 537 | if (!conflict) | ||
| 538 | break; | ||
| 539 | if (conflict != parent) { | ||
| 540 | parent = conflict; | ||
| 541 | if (!(conflict->flags & IORESOURCE_BUSY)) | ||
| 542 | continue; | ||
| 543 | } | ||
| 544 | |||
| 545 | /* Uhhuh, that didn't work out.. */ | ||
| 546 | kfree(res); | ||
| 547 | res = NULL; | ||
| 548 | break; | ||
| 549 | } | ||
| 550 | |||
| 551 | if (!res) { | ||
| 552 | printk(KERN_DEBUG " __reserve_region_with_split: (%s) [%llx, %llx], res: (%s) [%llx, %llx]\n", | ||
| 553 | conflict->name, conflict->start, conflict->end, | ||
| 554 | name, start, end); | ||
| 555 | |||
| 556 | /* failed, split and try again */ | ||
| 557 | |||
| 558 | /* conflict coverred whole area */ | ||
| 559 | if (conflict->start <= start && conflict->end >= end) | ||
| 560 | return; | ||
| 561 | |||
| 562 | if (conflict->start > start) | ||
| 563 | __reserve_region_with_split(root, start, conflict->start-1, name); | ||
| 564 | if (!(conflict->flags & IORESOURCE_BUSY)) { | ||
| 565 | resource_size_t common_start, common_end; | ||
| 566 | |||
| 567 | common_start = max(conflict->start, start); | ||
| 568 | common_end = min(conflict->end, end); | ||
| 569 | if (common_start < common_end) | ||
| 570 | __reserve_region_with_split(root, common_start, common_end, name); | ||
| 571 | } | ||
| 572 | if (conflict->end < end) | ||
| 573 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
| 574 | } | ||
| 575 | |||
| 576 | } | ||
| 577 | |||
| 578 | void reserve_region_with_split(struct resource *root, | ||
| 579 | resource_size_t start, resource_size_t end, | ||
| 580 | const char *name) | ||
| 581 | { | ||
| 582 | write_lock(&resource_lock); | ||
| 583 | __reserve_region_with_split(root, start, end, name); | ||
| 584 | write_unlock(&resource_lock); | ||
| 585 | } | ||
| 586 | |||
| 519 | EXPORT_SYMBOL(adjust_resource); | 587 | EXPORT_SYMBOL(adjust_resource); |
| 520 | 588 | ||
| 521 | /** | 589 | /** |
