diff options
author | H. Peter Anvin <hpa@zytor.com> | 2007-07-11 15:18:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 13:55:54 -0400 |
commit | ec481536b15eb0520d8f0204b0294480050fe1f8 (patch) | |
tree | 3f959a26ca58477734ea1e4d5370b2d3a33a1680 /include/asm-x86_64 | |
parent | f8c09377d754f35a135454181b869ab527cc0757 (diff) |
Unify the CPU features vectors between i386 and x86-64
Unify the handling of the CPU features vectors between i386 and x86-64.
This also adopts the collapsing of features which are required at
compile-time into constant tests from x86-64 to i386.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/alternative.h | 68 | ||||
-rw-r--r-- | include/asm-x86_64/cpufeature.h | 115 | ||||
-rw-r--r-- | include/asm-x86_64/processor.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/required-features.h | 45 |
4 files changed, 92 insertions, 138 deletions
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h index a09427640764..eea7aecfac78 100644 --- a/include/asm-x86_64/alternative.h +++ b/include/asm-x86_64/alternative.h | |||
@@ -5,6 +5,41 @@ | |||
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/stddef.h> | 7 | #include <linux/stddef.h> |
8 | |||
9 | /* | ||
10 | * Alternative inline assembly for SMP. | ||
11 | * | ||
12 | * The LOCK_PREFIX macro defined here replaces the LOCK and | ||
13 | * LOCK_PREFIX macros used everywhere in the source tree. | ||
14 | * | ||
15 | * SMP alternatives use the same data structures as the other | ||
16 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | ||
17 | * UP system running a SMP kernel. The existing apply_alternatives() | ||
18 | * works fine for patching a SMP kernel for UP. | ||
19 | * | ||
20 | * The SMP alternative tables can be kept after boot and contain both | ||
21 | * UP and SMP versions of the instructions to allow switching back to | ||
22 | * SMP at runtime, when hotplugging in a new CPU, which is especially | ||
23 | * useful in virtualized environments. | ||
24 | * | ||
25 | * The very common lock prefix is handled as special case in a | ||
26 | * separate table which is a pure address list without replacement ptr | ||
27 | * and size information. That keeps the table sizes small. | ||
28 | */ | ||
29 | |||
30 | #ifdef CONFIG_SMP | ||
31 | #define LOCK_PREFIX \ | ||
32 | ".section .smp_locks,\"a\"\n" \ | ||
33 | " .align 8\n" \ | ||
34 | " .quad 661f\n" /* address */ \ | ||
35 | ".previous\n" \ | ||
36 | "661:\n\tlock; " | ||
37 | |||
38 | #else /* ! CONFIG_SMP */ | ||
39 | #define LOCK_PREFIX "" | ||
40 | #endif | ||
41 | |||
42 | /* This must be included *after* the definition of LOCK_PREFIX */ | ||
8 | #include <asm/cpufeature.h> | 43 | #include <asm/cpufeature.h> |
9 | 44 | ||
10 | struct alt_instr { | 45 | struct alt_instr { |
@@ -108,39 +143,6 @@ static inline void alternatives_smp_switch(int smp) {} | |||
108 | */ | 143 | */ |
109 | #define ASM_OUTPUT2(a, b) a, b | 144 | #define ASM_OUTPUT2(a, b) a, b |
110 | 145 | ||
111 | /* | ||
112 | * Alternative inline assembly for SMP. | ||
113 | * | ||
114 | * The LOCK_PREFIX macro defined here replaces the LOCK and | ||
115 | * LOCK_PREFIX macros used everywhere in the source tree. | ||
116 | * | ||
117 | * SMP alternatives use the same data structures as the other | ||
118 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | ||
119 | * UP system running a SMP kernel. The existing apply_alternatives() | ||
120 | * works fine for patching a SMP kernel for UP. | ||
121 | * | ||
122 | * The SMP alternative tables can be kept after boot and contain both | ||
123 | * UP and SMP versions of the instructions to allow switching back to | ||
124 | * SMP at runtime, when hotplugging in a new CPU, which is especially | ||
125 | * useful in virtualized environments. | ||
126 | * | ||
127 | * The very common lock prefix is handled as special case in a | ||
128 | * separate table which is a pure address list without replacement ptr | ||
129 | * and size information. That keeps the table sizes small. | ||
130 | */ | ||
131 | |||
132 | #ifdef CONFIG_SMP | ||
133 | #define LOCK_PREFIX \ | ||
134 | ".section .smp_locks,\"a\"\n" \ | ||
135 | " .align 8\n" \ | ||
136 | " .quad 661f\n" /* address */ \ | ||
137 | ".previous\n" \ | ||
138 | "661:\n\tlock; " | ||
139 | |||
140 | #else /* ! CONFIG_SMP */ | ||
141 | #define LOCK_PREFIX "" | ||
142 | #endif | ||
143 | |||
144 | struct paravirt_patch; | 146 | struct paravirt_patch; |
145 | #ifdef CONFIG_PARAVIRT | 147 | #ifdef CONFIG_PARAVIRT |
146 | void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); | 148 | void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); |
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 0b3c686139f1..8baefc3beb2e 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h | |||
@@ -7,115 +7,24 @@ | |||
7 | #ifndef __ASM_X8664_CPUFEATURE_H | 7 | #ifndef __ASM_X8664_CPUFEATURE_H |
8 | #define __ASM_X8664_CPUFEATURE_H | 8 | #define __ASM_X8664_CPUFEATURE_H |
9 | 9 | ||
10 | #define NCAPINTS 7 /* N 32-bit words worth of info */ | 10 | #include <asm-i386/cpufeature.h> |
11 | 11 | ||
12 | /* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ | 12 | #undef cpu_has_vme |
13 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | ||
14 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ | ||
15 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | ||
16 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | ||
17 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | ||
18 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | ||
19 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | ||
20 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | ||
21 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | ||
22 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ | ||
23 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ | ||
24 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | ||
25 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | ||
26 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | ||
27 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | ||
28 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | ||
29 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | ||
30 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | ||
31 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | ||
32 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ | ||
33 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | ||
34 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | ||
35 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | ||
36 | /* of FPU context), and CR4.OSFXSR available */ | ||
37 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | ||
38 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | ||
39 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
40 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | ||
41 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | ||
42 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | ||
43 | |||
44 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | ||
45 | /* Don't duplicate feature flags which are redundant with Intel! */ | ||
46 | #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ | ||
47 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | ||
48 | #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ | ||
49 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | ||
50 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | ||
51 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | ||
52 | #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ | ||
53 | |||
54 | /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ | ||
55 | #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ | ||
56 | #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ | ||
57 | #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ | ||
58 | |||
59 | /* Other features, Linux-defined mapping, word 3 */ | ||
60 | /* This range is used for feature bits which conflict or are synthesized */ | ||
61 | #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ | ||
62 | #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ | ||
63 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | ||
64 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | ||
65 | #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ | ||
66 | #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ | ||
67 | #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ | ||
68 | #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ | ||
69 | #define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ | ||
70 | #define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ | ||
71 | #define X86_FEATURE_PEBS (3*32+10) /* Precise-Event Based Sampling */ | ||
72 | #define X86_FEATURE_BTS (3*32+11) /* Branch Trace Store */ | ||
73 | |||
74 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | ||
75 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | ||
76 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | ||
77 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | ||
78 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | ||
79 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | ||
80 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | ||
81 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | ||
82 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | ||
83 | |||
84 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | ||
85 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | ||
86 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | ||
87 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | ||
88 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | ||
89 | |||
90 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | ||
91 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | ||
92 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | ||
93 | |||
94 | #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) | ||
95 | #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) | ||
96 | |||
97 | #define cpu_has_fpu 1 | ||
98 | #define cpu_has_vme 0 | 13 | #define cpu_has_vme 0 |
99 | #define cpu_has_de 1 | 14 | |
100 | #define cpu_has_pse 1 | 15 | #undef cpu_has_pae |
101 | #define cpu_has_tsc 1 | ||
102 | #define cpu_has_pae ___BUG___ | 16 | #define cpu_has_pae ___BUG___ |
103 | #define cpu_has_pge 1 | 17 | |
104 | #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) | 18 | #undef cpu_has_mp |
105 | #define cpu_has_mtrr 1 | ||
106 | #define cpu_has_mmx 1 | ||
107 | #define cpu_has_fxsr 1 | ||
108 | #define cpu_has_xmm 1 | ||
109 | #define cpu_has_xmm2 1 | ||
110 | #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) | ||
111 | #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) | ||
112 | #define cpu_has_mp 1 /* XXX */ | 19 | #define cpu_has_mp 1 /* XXX */ |
20 | |||
21 | #undef cpu_has_k6_mtrr | ||
113 | #define cpu_has_k6_mtrr 0 | 22 | #define cpu_has_k6_mtrr 0 |
23 | |||
24 | #undef cpu_has_cyrix_arr | ||
114 | #define cpu_has_cyrix_arr 0 | 25 | #define cpu_has_cyrix_arr 0 |
26 | |||
27 | #undef cpu_has_centaur_mcr | ||
115 | #define cpu_has_centaur_mcr 0 | 28 | #define cpu_has_centaur_mcr 0 |
116 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | ||
117 | #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) | ||
118 | #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) | ||
119 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | ||
120 | 29 | ||
121 | #endif /* __ASM_X8664_CPUFEATURE_H */ | 30 | #endif /* __ASM_X8664_CPUFEATURE_H */ |
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 461ffe4c1fcc..df6457248d62 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h | |||
@@ -368,8 +368,6 @@ static inline void sync_core(void) | |||
368 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | 368 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); |
369 | } | 369 | } |
370 | 370 | ||
371 | #define cpu_has_fpu 1 | ||
372 | |||
373 | #define ARCH_HAS_PREFETCH | 371 | #define ARCH_HAS_PREFETCH |
374 | static inline void prefetch(void *x) | 372 | static inline void prefetch(void *x) |
375 | { | 373 | { |
diff --git a/include/asm-x86_64/required-features.h b/include/asm-x86_64/required-features.h new file mode 100644 index 000000000000..262f3159d032 --- /dev/null +++ b/include/asm-x86_64/required-features.h | |||
@@ -0,0 +1,45 @@ | |||
1 | #ifndef _ASM_REQUIRED_FEATURES_H | ||
2 | #define _ASM_REQUIRED_FEATURES_H 1 | ||
3 | |||
4 | /* Define minimum CPUID feature set for kernel These bits are checked | ||
5 | really early to actually display a visible error message before the | ||
6 | kernel dies. Make sure to assign features to the proper mask! | ||
7 | |||
8 | The real information is in arch/x86_64/Kconfig.cpu, this just converts | ||
9 | the CONFIGs into a bitmask */ | ||
10 | |||
11 | /* x86-64 baseline features */ | ||
12 | #define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) | ||
13 | #define NEED_PSE (1<<(X86_FEATURE_PSE & 31)) | ||
14 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) | ||
15 | #define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) | ||
16 | #define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) | ||
17 | #define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) | ||
18 | #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) | ||
19 | #define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) | ||
20 | #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) | ||
21 | #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) | ||
22 | |||
23 | #define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ | ||
24 | NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ | ||
25 | NEED_XMM|NEED_XMM2) | ||
26 | #define SSE_MASK (NEED_XMM|NEED_XMM2) | ||
27 | |||
28 | /* x86-64 baseline features */ | ||
29 | #define NEED_LM (1<<(X86_FEATURE_LM & 31)) | ||
30 | |||
31 | #ifdef CONFIG_X86_USE_3DNOW | ||
32 | # define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) | ||
33 | #else | ||
34 | # define NEED_3DNOW 0 | ||
35 | #endif | ||
36 | |||
37 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) | ||
38 | |||
39 | #define REQUIRED_MASK2 0 | ||
40 | #define REQUIRED_MASK3 0 | ||
41 | #define REQUIRED_MASK4 0 | ||
42 | #define REQUIRED_MASK5 0 | ||
43 | #define REQUIRED_MASK6 0 | ||
44 | |||
45 | #endif | ||