diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
27 files changed, 1997 insertions, 2003 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index ee76eaad3001..7f0b45a5d788 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -3,22 +3,30 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
6 | obj-y += proc.o feature_names.o | 6 | obj-y += proc.o capflags.o powerflags.o common.o |
7 | 7 | ||
8 | obj-$(CONFIG_X86_32) += common.o bugs.o | 8 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o |
9 | obj-$(CONFIG_X86_64) += common_64.o bugs_64.o | 9 | obj-$(CONFIG_X86_64) += bugs_64.o |
10 | obj-$(CONFIG_X86_32) += amd.o | 10 | |
11 | obj-$(CONFIG_X86_64) += amd_64.o | 11 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o |
12 | obj-$(CONFIG_X86_32) += cyrix.o | 12 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
13 | obj-$(CONFIG_X86_32) += centaur.o | 13 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
14 | obj-$(CONFIG_X86_64) += centaur_64.o | 14 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o |
15 | obj-$(CONFIG_X86_32) += transmeta.o | 15 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o |
16 | obj-$(CONFIG_X86_32) += intel.o | 16 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
17 | obj-$(CONFIG_X86_64) += intel_64.o | 17 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o |
18 | obj-$(CONFIG_X86_32) += umc.o | ||
19 | 18 | ||
20 | obj-$(CONFIG_X86_MCE) += mcheck/ | 19 | obj-$(CONFIG_X86_MCE) += mcheck/ |
21 | obj-$(CONFIG_MTRR) += mtrr/ | 20 | obj-$(CONFIG_MTRR) += mtrr/ |
22 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 21 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
23 | 22 | ||
24 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 23 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
24 | |||
25 | quiet_cmd_mkcapflags = MKCAP $@ | ||
26 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ | ||
27 | |||
28 | cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h | ||
29 | |||
30 | targets += capflags.c | ||
31 | $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE | ||
32 | $(call if_changed,mkcapflags) | ||
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index a6ef672adbba..0d9c993aa93e 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <asm/pat.h> | 7 | #include <asm/pat.h> |
8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
9 | 9 | ||
10 | #include <mach_apic.h> | ||
11 | |||
10 | struct cpuid_bit { | 12 | struct cpuid_bit { |
11 | u16 feature; | 13 | u16 feature; |
12 | u8 reg; | 14 | u8 reg; |
@@ -48,6 +50,92 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
48 | } | 50 | } |
49 | } | 51 | } |
50 | 52 | ||
53 | /* leaf 0xb SMT level */ | ||
54 | #define SMT_LEVEL 0 | ||
55 | |||
56 | /* leaf 0xb sub-leaf types */ | ||
57 | #define INVALID_TYPE 0 | ||
58 | #define SMT_TYPE 1 | ||
59 | #define CORE_TYPE 2 | ||
60 | |||
61 | #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) | ||
62 | #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) | ||
63 | #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) | ||
64 | |||
65 | /* | ||
66 | * Check for extended topology enumeration cpuid leaf 0xb and if it | ||
67 | * exists, use it for populating initial_apicid and cpu topology | ||
68 | * detection. | ||
69 | */ | ||
70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | ||
71 | { | ||
72 | #ifdef CONFIG_SMP | ||
73 | unsigned int eax, ebx, ecx, edx, sub_index; | ||
74 | unsigned int ht_mask_width, core_plus_mask_width; | ||
75 | unsigned int core_select_mask, core_level_siblings; | ||
76 | |||
77 | if (c->cpuid_level < 0xb) | ||
78 | return; | ||
79 | |||
80 | cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); | ||
81 | |||
82 | /* | ||
83 | * check if the cpuid leaf 0xb is actually implemented. | ||
84 | */ | ||
85 | if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) | ||
86 | return; | ||
87 | |||
88 | set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); | ||
89 | |||
90 | /* | ||
91 | * initial apic id, which also represents 32-bit extended x2apic id. | ||
92 | */ | ||
93 | c->initial_apicid = edx; | ||
94 | |||
95 | /* | ||
96 | * Populate HT related information from sub-leaf level 0. | ||
97 | */ | ||
98 | core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); | ||
99 | core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); | ||
100 | |||
101 | sub_index = 1; | ||
102 | do { | ||
103 | cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); | ||
104 | |||
105 | /* | ||
106 | * Check for the Core type in the implemented sub leaves. | ||
107 | */ | ||
108 | if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { | ||
109 | core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); | ||
110 | core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); | ||
111 | break; | ||
112 | } | ||
113 | |||
114 | sub_index++; | ||
115 | } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); | ||
116 | |||
117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; | ||
118 | |||
119 | #ifdef CONFIG_X86_32 | ||
120 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) | ||
121 | & core_select_mask; | ||
122 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); | ||
123 | #else | ||
124 | c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; | ||
125 | c->phys_proc_id = phys_pkg_id(core_plus_mask_width); | ||
126 | #endif | ||
127 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | ||
128 | |||
129 | |||
130 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
131 | c->phys_proc_id); | ||
132 | if (c->x86_max_cores > 1) | ||
133 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
134 | c->cpu_core_id); | ||
135 | return; | ||
136 | #endif | ||
137 | } | ||
138 | |||
51 | #ifdef CONFIG_X86_PAT | 139 | #ifdef CONFIG_X86_PAT |
52 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | 140 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) |
53 | { | 141 | { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 18514ed26104..32e73520adf7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -1,13 +1,22 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | |||
4 | #include <asm/io.h> | 5 | #include <asm/io.h> |
5 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
6 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
7 | 8 | ||
9 | #ifdef CONFIG_X86_64 | ||
10 | # include <asm/numa_64.h> | ||
11 | # include <asm/mmconfig.h> | ||
12 | # include <asm/cacheflush.h> | ||
13 | #endif | ||
14 | |||
8 | #include <mach_apic.h> | 15 | #include <mach_apic.h> |
16 | |||
9 | #include "cpu.h" | 17 | #include "cpu.h" |
10 | 18 | ||
19 | #ifdef CONFIG_X86_32 | ||
11 | /* | 20 | /* |
12 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | 21 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause |
13 | * misexecution of code under Linux. Owners of such processors should | 22 | * misexecution of code under Linux. Owners of such processors should |
@@ -24,26 +33,273 @@ | |||
24 | extern void vide(void); | 33 | extern void vide(void); |
25 | __asm__(".align 4\nvide: ret"); | 34 | __asm__(".align 4\nvide: ret"); |
26 | 35 | ||
27 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 36 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) |
28 | { | 37 | { |
29 | if (cpuid_eax(0x80000000) >= 0x80000007) { | 38 | /* |
30 | c->x86_power = cpuid_edx(0x80000007); | 39 | * General Systems BIOSen alias the cpu frequency registers |
31 | if (c->x86_power & (1<<8)) | 40 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux |
32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 41 | * drivers subsequently pokes it, and changes the CPU speed. |
42 | * Workaround : Remove the unneeded alias. | ||
43 | */ | ||
44 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | ||
45 | #define CBAR_ENB (0x80000000) | ||
46 | #define CBAR_KEY (0X000000CB) | ||
47 | if (c->x86_model == 9 || c->x86_model == 10) { | ||
48 | if (inl (CBAR) & CBAR_ENB) | ||
49 | outl (0 | CBAR_KEY, CBAR); | ||
33 | } | 50 | } |
34 | |||
35 | /* Set MTRR capability flag if appropriate */ | ||
36 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
37 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
38 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
39 | } | 51 | } |
40 | 52 | ||
41 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 53 | |
54 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | ||
42 | { | 55 | { |
43 | u32 l, h; | 56 | u32 l, h; |
44 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | 57 | int mbytes = num_physpages >> (20-PAGE_SHIFT); |
45 | int r; | ||
46 | 58 | ||
59 | if (c->x86_model < 6) { | ||
60 | /* Based on AMD doc 20734R - June 2000 */ | ||
61 | if (c->x86_model == 0) { | ||
62 | clear_cpu_cap(c, X86_FEATURE_APIC); | ||
63 | set_cpu_cap(c, X86_FEATURE_PGE); | ||
64 | } | ||
65 | return; | ||
66 | } | ||
67 | |||
68 | if (c->x86_model == 6 && c->x86_mask == 1) { | ||
69 | const int K6_BUG_LOOP = 1000000; | ||
70 | int n; | ||
71 | void (*f_vide)(void); | ||
72 | unsigned long d, d2; | ||
73 | |||
74 | printk(KERN_INFO "AMD K6 stepping B detected - "); | ||
75 | |||
76 | /* | ||
77 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | ||
78 | * calls at the same time. | ||
79 | */ | ||
80 | |||
81 | n = K6_BUG_LOOP; | ||
82 | f_vide = vide; | ||
83 | rdtscl(d); | ||
84 | while (n--) | ||
85 | f_vide(); | ||
86 | rdtscl(d2); | ||
87 | d = d2-d; | ||
88 | |||
89 | if (d > 20*K6_BUG_LOOP) | ||
90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | ||
91 | else | ||
92 | printk("probably OK (after B9730xxxx).\n"); | ||
93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | ||
94 | } | ||
95 | |||
96 | /* K6 with old style WHCR */ | ||
97 | if (c->x86_model < 8 || | ||
98 | (c->x86_model == 8 && c->x86_mask < 8)) { | ||
99 | /* We can only write allocate on the low 508Mb */ | ||
100 | if (mbytes > 508) | ||
101 | mbytes = 508; | ||
102 | |||
103 | rdmsr(MSR_K6_WHCR, l, h); | ||
104 | if ((l&0x0000FFFF) == 0) { | ||
105 | unsigned long flags; | ||
106 | l = (1<<0)|((mbytes/4)<<1); | ||
107 | local_irq_save(flags); | ||
108 | wbinvd(); | ||
109 | wrmsr(MSR_K6_WHCR, l, h); | ||
110 | local_irq_restore(flags); | ||
111 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | ||
112 | mbytes); | ||
113 | } | ||
114 | return; | ||
115 | } | ||
116 | |||
117 | if ((c->x86_model == 8 && c->x86_mask > 7) || | ||
118 | c->x86_model == 9 || c->x86_model == 13) { | ||
119 | /* The more serious chips .. */ | ||
120 | |||
121 | if (mbytes > 4092) | ||
122 | mbytes = 4092; | ||
123 | |||
124 | rdmsr(MSR_K6_WHCR, l, h); | ||
125 | if ((l&0xFFFF0000) == 0) { | ||
126 | unsigned long flags; | ||
127 | l = ((mbytes>>2)<<22)|(1<<16); | ||
128 | local_irq_save(flags); | ||
129 | wbinvd(); | ||
130 | wrmsr(MSR_K6_WHCR, l, h); | ||
131 | local_irq_restore(flags); | ||
132 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | ||
133 | mbytes); | ||
134 | } | ||
135 | |||
136 | return; | ||
137 | } | ||
138 | |||
139 | if (c->x86_model == 10) { | ||
140 | /* AMD Geode LX is model 10 */ | ||
141 | /* placeholder for any needed mods */ | ||
142 | return; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | ||
147 | { | ||
148 | u32 l, h; | ||
149 | |||
150 | /* | ||
151 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
152 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
153 | * If the BIOS didn't enable it already, enable it here. | ||
154 | */ | ||
155 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
156 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
157 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
158 | rdmsr(MSR_K7_HWCR, l, h); | ||
159 | l &= ~0x00008000; | ||
160 | wrmsr(MSR_K7_HWCR, l, h); | ||
161 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
167 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
168 | * As per AMD technical note 27212 0.2 | ||
169 | */ | ||
170 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
171 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
172 | if ((l & 0xfff00000) != 0x20000000) { | ||
173 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | ||
174 | ((l & 0x000fffff)|0x20000000)); | ||
175 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | set_cpu_cap(c, X86_FEATURE_K7); | ||
180 | } | ||
181 | #endif | ||
182 | |||
183 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
184 | static int __cpuinit nearby_node(int apicid) | ||
185 | { | ||
186 | int i, node; | ||
187 | |||
188 | for (i = apicid - 1; i >= 0; i--) { | ||
189 | node = apicid_to_node[i]; | ||
190 | if (node != NUMA_NO_NODE && node_online(node)) | ||
191 | return node; | ||
192 | } | ||
193 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
194 | node = apicid_to_node[i]; | ||
195 | if (node != NUMA_NO_NODE && node_online(node)) | ||
196 | return node; | ||
197 | } | ||
198 | return first_node(node_online_map); /* Shouldn't happen */ | ||
199 | } | ||
200 | #endif | ||
201 | |||
202 | /* | ||
203 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
204 | * Assumes number of cores is a power of two. | ||
205 | */ | ||
206 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
207 | { | ||
208 | #ifdef CONFIG_X86_HT | ||
209 | unsigned bits; | ||
210 | |||
211 | bits = c->x86_coreid_bits; | ||
212 | |||
213 | /* Low order bits define the core id (index of core in socket) */ | ||
214 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
215 | /* Convert the initial APIC ID into the socket ID */ | ||
216 | c->phys_proc_id = c->initial_apicid >> bits; | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | ||
221 | { | ||
222 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
223 | int cpu = smp_processor_id(); | ||
224 | int node; | ||
225 | unsigned apicid = hard_smp_processor_id(); | ||
226 | |||
227 | node = c->phys_proc_id; | ||
228 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
229 | node = apicid_to_node[apicid]; | ||
230 | if (!node_online(node)) { | ||
231 | /* Two possibilities here: | ||
232 | - The CPU is missing memory and no node was created. | ||
233 | In that case try picking one from a nearby CPU | ||
234 | - The APIC IDs differ from the HyperTransport node IDs | ||
235 | which the K8 northbridge parsing fills in. | ||
236 | Assume they are all increased by a constant offset, | ||
237 | but in the same order as the HT nodeids. | ||
238 | If that doesn't result in a usable node fall back to the | ||
239 | path for the previous case. */ | ||
240 | |||
241 | int ht_nodeid = c->initial_apicid; | ||
242 | |||
243 | if (ht_nodeid >= 0 && | ||
244 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
245 | node = apicid_to_node[ht_nodeid]; | ||
246 | /* Pick a nearby node */ | ||
247 | if (!node_online(node)) | ||
248 | node = nearby_node(apicid); | ||
249 | } | ||
250 | numa_set_node(cpu, node); | ||
251 | |||
252 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
253 | #endif | ||
254 | } | ||
255 | |||
256 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
257 | { | ||
258 | #ifdef CONFIG_X86_HT | ||
259 | unsigned bits, ecx; | ||
260 | |||
261 | /* Multi core CPU? */ | ||
262 | if (c->extended_cpuid_level < 0x80000008) | ||
263 | return; | ||
264 | |||
265 | ecx = cpuid_ecx(0x80000008); | ||
266 | |||
267 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
268 | |||
269 | /* CPU telling us the core id bits shift? */ | ||
270 | bits = (ecx >> 12) & 0xF; | ||
271 | |||
272 | /* Otherwise recompute */ | ||
273 | if (bits == 0) { | ||
274 | while ((1 << bits) < c->x86_max_cores) | ||
275 | bits++; | ||
276 | } | ||
277 | |||
278 | c->x86_coreid_bits = bits; | ||
279 | #endif | ||
280 | } | ||
281 | |||
282 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
283 | { | ||
284 | early_init_amd_mc(c); | ||
285 | |||
286 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
287 | if (c->x86_power & (1<<8)) | ||
288 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
289 | |||
290 | #ifdef CONFIG_X86_64 | ||
291 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
292 | #else | ||
293 | /* Set MTRR capability flag if appropriate */ | ||
294 | if (c->x86 == 5) | ||
295 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
296 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
297 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
302 | { | ||
47 | #ifdef CONFIG_SMP | 303 | #ifdef CONFIG_SMP |
48 | unsigned long long value; | 304 | unsigned long long value; |
49 | 305 | ||
@@ -54,7 +310,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
54 | * Errata 63 for SH-B3 steppings | 310 | * Errata 63 for SH-B3 steppings |
55 | * Errata 122 for all steppings (F+ have it disabled by default) | 311 | * Errata 122 for all steppings (F+ have it disabled by default) |
56 | */ | 312 | */ |
57 | if (c->x86 == 15) { | 313 | if (c->x86 == 0xf) { |
58 | rdmsrl(MSR_K7_HWCR, value); | 314 | rdmsrl(MSR_K7_HWCR, value); |
59 | value |= 1 << 6; | 315 | value |= 1 << 6; |
60 | wrmsrl(MSR_K7_HWCR, value); | 316 | wrmsrl(MSR_K7_HWCR, value); |
@@ -64,209 +320,119 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
64 | early_init_amd(c); | 320 | early_init_amd(c); |
65 | 321 | ||
66 | /* | 322 | /* |
67 | * FIXME: We should handle the K5 here. Set up the write | ||
68 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | ||
69 | * no bus pipeline) | ||
70 | */ | ||
71 | |||
72 | /* | ||
73 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 323 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
74 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | 324 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
75 | */ | 325 | */ |
76 | clear_cpu_cap(c, 0*32+31); | 326 | clear_cpu_cap(c, 0*32+31); |
77 | 327 | ||
78 | r = get_model_name(c); | 328 | #ifdef CONFIG_X86_64 |
329 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
330 | if (c->x86 == 0xf) { | ||
331 | u32 level; | ||
79 | 332 | ||
80 | switch (c->x86) { | 333 | level = cpuid_eax(1); |
81 | case 4: | 334 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
82 | /* | 335 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
83 | * General Systems BIOSen alias the cpu frequency registers | ||
84 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | ||
85 | * drivers subsequently pokes it, and changes the CPU speed. | ||
86 | * Workaround : Remove the unneeded alias. | ||
87 | */ | ||
88 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | ||
89 | #define CBAR_ENB (0x80000000) | ||
90 | #define CBAR_KEY (0X000000CB) | ||
91 | if (c->x86_model == 9 || c->x86_model == 10) { | ||
92 | if (inl (CBAR) & CBAR_ENB) | ||
93 | outl (0 | CBAR_KEY, CBAR); | ||
94 | } | ||
95 | break; | ||
96 | case 5: | ||
97 | if (c->x86_model < 6) { | ||
98 | /* Based on AMD doc 20734R - June 2000 */ | ||
99 | if (c->x86_model == 0) { | ||
100 | clear_cpu_cap(c, X86_FEATURE_APIC); | ||
101 | set_cpu_cap(c, X86_FEATURE_PGE); | ||
102 | } | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | if (c->x86_model == 6 && c->x86_mask == 1) { | ||
107 | const int K6_BUG_LOOP = 1000000; | ||
108 | int n; | ||
109 | void (*f_vide)(void); | ||
110 | unsigned long d, d2; | ||
111 | |||
112 | printk(KERN_INFO "AMD K6 stepping B detected - "); | ||
113 | |||
114 | /* | ||
115 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | ||
116 | * calls at the same time. | ||
117 | */ | ||
118 | |||
119 | n = K6_BUG_LOOP; | ||
120 | f_vide = vide; | ||
121 | rdtscl(d); | ||
122 | while (n--) | ||
123 | f_vide(); | ||
124 | rdtscl(d2); | ||
125 | d = d2-d; | ||
126 | |||
127 | if (d > 20*K6_BUG_LOOP) | ||
128 | printk("system stability may be impaired when more than 32 MB are used.\n"); | ||
129 | else | ||
130 | printk("probably OK (after B9730xxxx).\n"); | ||
131 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | ||
132 | } | ||
133 | |||
134 | /* K6 with old style WHCR */ | ||
135 | if (c->x86_model < 8 || | ||
136 | (c->x86_model == 8 && c->x86_mask < 8)) { | ||
137 | /* We can only write allocate on the low 508Mb */ | ||
138 | if (mbytes > 508) | ||
139 | mbytes = 508; | ||
140 | |||
141 | rdmsr(MSR_K6_WHCR, l, h); | ||
142 | if ((l&0x0000FFFF) == 0) { | ||
143 | unsigned long flags; | ||
144 | l = (1<<0)|((mbytes/4)<<1); | ||
145 | local_irq_save(flags); | ||
146 | wbinvd(); | ||
147 | wrmsr(MSR_K6_WHCR, l, h); | ||
148 | local_irq_restore(flags); | ||
149 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | ||
150 | mbytes); | ||
151 | } | ||
152 | break; | ||
153 | } | ||
154 | |||
155 | if ((c->x86_model == 8 && c->x86_mask > 7) || | ||
156 | c->x86_model == 9 || c->x86_model == 13) { | ||
157 | /* The more serious chips .. */ | ||
158 | |||
159 | if (mbytes > 4092) | ||
160 | mbytes = 4092; | ||
161 | |||
162 | rdmsr(MSR_K6_WHCR, l, h); | ||
163 | if ((l&0xFFFF0000) == 0) { | ||
164 | unsigned long flags; | ||
165 | l = ((mbytes>>2)<<22)|(1<<16); | ||
166 | local_irq_save(flags); | ||
167 | wbinvd(); | ||
168 | wrmsr(MSR_K6_WHCR, l, h); | ||
169 | local_irq_restore(flags); | ||
170 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | ||
171 | mbytes); | ||
172 | } | ||
173 | |||
174 | break; | ||
175 | } | ||
176 | |||
177 | if (c->x86_model == 10) { | ||
178 | /* AMD Geode LX is model 10 */ | ||
179 | /* placeholder for any needed mods */ | ||
180 | break; | ||
181 | } | ||
182 | break; | ||
183 | case 6: /* An Athlon/Duron */ | ||
184 | |||
185 | /* | ||
186 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
187 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
188 | * If the BIOS didn't enable it already, enable it here. | ||
189 | */ | ||
190 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
191 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
192 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
193 | rdmsr(MSR_K7_HWCR, l, h); | ||
194 | l &= ~0x00008000; | ||
195 | wrmsr(MSR_K7_HWCR, l, h); | ||
196 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
202 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
203 | * As per AMD technical note 27212 0.2 | ||
204 | */ | ||
205 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
206 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
207 | if ((l & 0xfff00000) != 0x20000000) { | ||
208 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | ||
209 | ((l & 0x000fffff)|0x20000000)); | ||
210 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
211 | } | ||
212 | } | ||
213 | break; | ||
214 | } | 336 | } |
337 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
338 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
339 | #else | ||
340 | |||
341 | /* | ||
342 | * FIXME: We should handle the K5 here. Set up the write | ||
343 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | ||
344 | * no bus pipeline) | ||
345 | */ | ||
215 | 346 | ||
216 | switch (c->x86) { | 347 | switch (c->x86) { |
217 | case 15: | 348 | case 4: |
218 | /* Use K8 tuning for Fam10h and Fam11h */ | 349 | init_amd_k5(c); |
219 | case 0x10: | ||
220 | case 0x11: | ||
221 | set_cpu_cap(c, X86_FEATURE_K8); | ||
222 | break; | 350 | break; |
223 | case 6: | 351 | case 5: |
224 | set_cpu_cap(c, X86_FEATURE_K7); | 352 | init_amd_k6(c); |
353 | break; | ||
354 | case 6: /* An Athlon/Duron */ | ||
355 | init_amd_k7(c); | ||
225 | break; | 356 | break; |
226 | } | 357 | } |
358 | |||
359 | /* K6s reports MCEs but don't actually have all the MSRs */ | ||
360 | if (c->x86 < 6) | ||
361 | clear_cpu_cap(c, X86_FEATURE_MCE); | ||
362 | #endif | ||
363 | |||
364 | /* Enable workaround for FXSAVE leak */ | ||
227 | if (c->x86 >= 6) | 365 | if (c->x86 >= 6) |
228 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | 366 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
229 | 367 | ||
230 | display_cacheinfo(c); | 368 | if (!c->x86_model_id[0]) { |
231 | 369 | switch (c->x86) { | |
232 | if (cpuid_eax(0x80000000) >= 0x80000008) | 370 | case 0xf: |
233 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; | 371 | /* Should distinguish Models here, but this is only |
372 | a fallback anyways. */ | ||
373 | strcpy(c->x86_model_id, "Hammer"); | ||
374 | break; | ||
375 | } | ||
376 | } | ||
234 | 377 | ||
235 | #ifdef CONFIG_X86_HT | 378 | display_cacheinfo(c); |
236 | /* | ||
237 | * On a AMD multi core setup the lower bits of the APIC id | ||
238 | * distinguish the cores. | ||
239 | */ | ||
240 | if (c->x86_max_cores > 1) { | ||
241 | int cpu = smp_processor_id(); | ||
242 | unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; | ||
243 | 379 | ||
244 | if (bits == 0) { | 380 | /* Multi core CPU? */ |
245 | while ((1 << bits) < c->x86_max_cores) | 381 | if (c->extended_cpuid_level >= 0x80000008) { |
246 | bits++; | 382 | amd_detect_cmp(c); |
247 | } | 383 | srat_detect_node(c); |
248 | c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1); | ||
249 | c->phys_proc_id >>= bits; | ||
250 | printk(KERN_INFO "CPU %d(%d) -> Core %d\n", | ||
251 | cpu, c->x86_max_cores, c->cpu_core_id); | ||
252 | } | 384 | } |
385 | |||
386 | #ifdef CONFIG_X86_32 | ||
387 | detect_ht(c); | ||
253 | #endif | 388 | #endif |
254 | 389 | ||
255 | if (cpuid_eax(0x80000000) >= 0x80000006) { | 390 | if (c->extended_cpuid_level >= 0x80000006) { |
256 | if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000)) | 391 | if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) |
257 | num_cache_leaves = 4; | 392 | num_cache_leaves = 4; |
258 | else | 393 | else |
259 | num_cache_leaves = 3; | 394 | num_cache_leaves = 3; |
260 | } | 395 | } |
261 | 396 | ||
262 | /* K6s reports MCEs but don't actually have all the MSRs */ | 397 | if (c->x86 >= 0xf && c->x86 <= 0x11) |
263 | if (c->x86 < 6) | 398 | set_cpu_cap(c, X86_FEATURE_K8); |
264 | clear_cpu_cap(c, X86_FEATURE_MCE); | ||
265 | 399 | ||
266 | if (cpu_has_xmm2) | 400 | if (cpu_has_xmm2) { |
401 | /* MFENCE stops RDTSC speculation */ | ||
267 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | 402 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
403 | } | ||
404 | |||
405 | #ifdef CONFIG_X86_64 | ||
406 | if (c->x86 == 0x10) { | ||
407 | /* do this for boot cpu */ | ||
408 | if (c == &boot_cpu_data) | ||
409 | check_enable_amd_mmconf_dmi(); | ||
410 | |||
411 | fam10h_check_enable_mmcfg(); | ||
412 | } | ||
413 | |||
414 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
415 | unsigned long long tseg; | ||
416 | |||
417 | /* | ||
418 | * Split up direct mapping around the TSEG SMM area. | ||
419 | * Don't do it for gbpages because there seems very little | ||
420 | * benefit in doing so. | ||
421 | */ | ||
422 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
423 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
424 | if ((tseg>>PMD_SHIFT) < | ||
425 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | ||
426 | ((tseg>>PMD_SHIFT) < | ||
427 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | ||
428 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | ||
429 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
430 | } | ||
431 | } | ||
432 | #endif | ||
268 | } | 433 | } |
269 | 434 | ||
435 | #ifdef CONFIG_X86_32 | ||
270 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 436 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
271 | { | 437 | { |
272 | /* AMD errata T13 (order #21922) */ | 438 | /* AMD errata T13 (order #21922) */ |
@@ -279,10 +445,12 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int | |||
279 | } | 445 | } |
280 | return size; | 446 | return size; |
281 | } | 447 | } |
448 | #endif | ||
282 | 449 | ||
283 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | 450 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { |
284 | .c_vendor = "AMD", | 451 | .c_vendor = "AMD", |
285 | .c_ident = { "AuthenticAMD" }, | 452 | .c_ident = { "AuthenticAMD" }, |
453 | #ifdef CONFIG_X86_32 | ||
286 | .c_models = { | 454 | .c_models = { |
287 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | 455 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = |
288 | { | 456 | { |
@@ -295,9 +463,11 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { | |||
295 | } | 463 | } |
296 | }, | 464 | }, |
297 | }, | 465 | }, |
466 | .c_size_cache = amd_size_cache, | ||
467 | #endif | ||
298 | .c_early_init = early_init_amd, | 468 | .c_early_init = early_init_amd, |
299 | .c_init = init_amd, | 469 | .c_init = init_amd, |
300 | .c_size_cache = amd_size_cache, | 470 | .c_x86_vendor = X86_VENDOR_AMD, |
301 | }; | 471 | }; |
302 | 472 | ||
303 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | 473 | cpu_dev_register(amd_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c deleted file mode 100644 index d1692b2a41ff..000000000000 --- a/arch/x86/kernel/cpu/amd_64.c +++ /dev/null | |||
@@ -1,224 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/mm.h> | ||
3 | |||
4 | #include <asm/numa_64.h> | ||
5 | #include <asm/mmconfig.h> | ||
6 | #include <asm/cacheflush.h> | ||
7 | |||
8 | #include <mach_apic.h> | ||
9 | |||
10 | #include "cpu.h" | ||
11 | |||
12 | int force_mwait __cpuinitdata; | ||
13 | |||
14 | #ifdef CONFIG_NUMA | ||
15 | static int __cpuinit nearby_node(int apicid) | ||
16 | { | ||
17 | int i, node; | ||
18 | |||
19 | for (i = apicid - 1; i >= 0; i--) { | ||
20 | node = apicid_to_node[i]; | ||
21 | if (node != NUMA_NO_NODE && node_online(node)) | ||
22 | return node; | ||
23 | } | ||
24 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
25 | node = apicid_to_node[i]; | ||
26 | if (node != NUMA_NO_NODE && node_online(node)) | ||
27 | return node; | ||
28 | } | ||
29 | return first_node(node_online_map); /* Shouldn't happen */ | ||
30 | } | ||
31 | #endif | ||
32 | |||
33 | /* | ||
34 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
35 | * Assumes number of cores is a power of two. | ||
36 | */ | ||
37 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
38 | { | ||
39 | #ifdef CONFIG_SMP | ||
40 | unsigned bits; | ||
41 | #ifdef CONFIG_NUMA | ||
42 | int cpu = smp_processor_id(); | ||
43 | int node = 0; | ||
44 | unsigned apicid = hard_smp_processor_id(); | ||
45 | #endif | ||
46 | bits = c->x86_coreid_bits; | ||
47 | |||
48 | /* Low order bits define the core id (index of core in socket) */ | ||
49 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
50 | /* Convert the initial APIC ID into the socket ID */ | ||
51 | c->phys_proc_id = c->initial_apicid >> bits; | ||
52 | |||
53 | #ifdef CONFIG_NUMA | ||
54 | node = c->phys_proc_id; | ||
55 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
56 | node = apicid_to_node[apicid]; | ||
57 | if (!node_online(node)) { | ||
58 | /* Two possibilities here: | ||
59 | - The CPU is missing memory and no node was created. | ||
60 | In that case try picking one from a nearby CPU | ||
61 | - The APIC IDs differ from the HyperTransport node IDs | ||
62 | which the K8 northbridge parsing fills in. | ||
63 | Assume they are all increased by a constant offset, | ||
64 | but in the same order as the HT nodeids. | ||
65 | If that doesn't result in a usable node fall back to the | ||
66 | path for the previous case. */ | ||
67 | |||
68 | int ht_nodeid = c->initial_apicid; | ||
69 | |||
70 | if (ht_nodeid >= 0 && | ||
71 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
72 | node = apicid_to_node[ht_nodeid]; | ||
73 | /* Pick a nearby node */ | ||
74 | if (!node_online(node)) | ||
75 | node = nearby_node(apicid); | ||
76 | } | ||
77 | numa_set_node(cpu, node); | ||
78 | |||
79 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
80 | #endif | ||
81 | #endif | ||
82 | } | ||
83 | |||
84 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
85 | { | ||
86 | #ifdef CONFIG_SMP | ||
87 | unsigned bits, ecx; | ||
88 | |||
89 | /* Multi core CPU? */ | ||
90 | if (c->extended_cpuid_level < 0x80000008) | ||
91 | return; | ||
92 | |||
93 | ecx = cpuid_ecx(0x80000008); | ||
94 | |||
95 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
96 | |||
97 | /* CPU telling us the core id bits shift? */ | ||
98 | bits = (ecx >> 12) & 0xF; | ||
99 | |||
100 | /* Otherwise recompute */ | ||
101 | if (bits == 0) { | ||
102 | while ((1 << bits) < c->x86_max_cores) | ||
103 | bits++; | ||
104 | } | ||
105 | |||
106 | c->x86_coreid_bits = bits; | ||
107 | |||
108 | #endif | ||
109 | } | ||
110 | |||
111 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
112 | { | ||
113 | early_init_amd_mc(c); | ||
114 | |||
115 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
116 | if (c->x86_power & (1<<8)) | ||
117 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
118 | |||
119 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
120 | } | ||
121 | |||
122 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
123 | { | ||
124 | unsigned level; | ||
125 | |||
126 | #ifdef CONFIG_SMP | ||
127 | unsigned long value; | ||
128 | |||
129 | /* | ||
130 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
131 | * bit 6 of msr C001_0015 | ||
132 | * | ||
133 | * Errata 63 for SH-B3 steppings | ||
134 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
135 | */ | ||
136 | if (c->x86 == 0xf) { | ||
137 | rdmsrl(MSR_K8_HWCR, value); | ||
138 | value |= 1 << 6; | ||
139 | wrmsrl(MSR_K8_HWCR, value); | ||
140 | } | ||
141 | #endif | ||
142 | |||
143 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | ||
144 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | ||
145 | clear_cpu_cap(c, 0*32+31); | ||
146 | |||
147 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
148 | if (c->x86 == 0xf) { | ||
149 | level = cpuid_eax(1); | ||
150 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | ||
151 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
152 | } | ||
153 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
154 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
155 | |||
156 | /* Enable workaround for FXSAVE leak */ | ||
157 | if (c->x86 >= 6) | ||
158 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | ||
159 | |||
160 | level = get_model_name(c); | ||
161 | if (!level) { | ||
162 | switch (c->x86) { | ||
163 | case 0xf: | ||
164 | /* Should distinguish Models here, but this is only | ||
165 | a fallback anyways. */ | ||
166 | strcpy(c->x86_model_id, "Hammer"); | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | display_cacheinfo(c); | ||
171 | |||
172 | /* Multi core CPU? */ | ||
173 | if (c->extended_cpuid_level >= 0x80000008) | ||
174 | amd_detect_cmp(c); | ||
175 | |||
176 | if (c->extended_cpuid_level >= 0x80000006 && | ||
177 | (cpuid_edx(0x80000006) & 0xf000)) | ||
178 | num_cache_leaves = 4; | ||
179 | else | ||
180 | num_cache_leaves = 3; | ||
181 | |||
182 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
183 | set_cpu_cap(c, X86_FEATURE_K8); | ||
184 | |||
185 | /* MFENCE stops RDTSC speculation */ | ||
186 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
187 | |||
188 | if (c->x86 == 0x10) { | ||
189 | /* do this for boot cpu */ | ||
190 | if (c == &boot_cpu_data) | ||
191 | check_enable_amd_mmconf_dmi(); | ||
192 | |||
193 | fam10h_check_enable_mmcfg(); | ||
194 | } | ||
195 | |||
196 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
197 | unsigned long long tseg; | ||
198 | |||
199 | /* | ||
200 | * Split up direct mapping around the TSEG SMM area. | ||
201 | * Don't do it for gbpages because there seems very little | ||
202 | * benefit in doing so. | ||
203 | */ | ||
204 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
205 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
206 | if ((tseg>>PMD_SHIFT) < | ||
207 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | ||
208 | ((tseg>>PMD_SHIFT) < | ||
209 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | ||
210 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | ||
211 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
212 | } | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | ||
217 | .c_vendor = "AMD", | ||
218 | .c_ident = { "AuthenticAMD" }, | ||
219 | .c_early_init = early_init_amd, | ||
220 | .c_init = init_amd, | ||
221 | }; | ||
222 | |||
223 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | ||
224 | |||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index a0534c04d38a..89bfdd9cacc6 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -289,7 +289,6 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
289 | if (c->x86_model >= 6 && c->x86_model < 9) | 289 | if (c->x86_model >= 6 && c->x86_model < 9) |
290 | set_cpu_cap(c, X86_FEATURE_3DNOW); | 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
291 | 291 | ||
292 | get_model_name(c); | ||
293 | display_cacheinfo(c); | 292 | display_cacheinfo(c); |
294 | } | 293 | } |
295 | 294 | ||
@@ -475,6 +474,7 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
475 | .c_early_init = early_init_centaur, | 474 | .c_early_init = early_init_centaur, |
476 | .c_init = init_centaur, | 475 | .c_init = init_centaur, |
477 | .c_size_cache = centaur_size_cache, | 476 | .c_size_cache = centaur_size_cache, |
477 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
478 | }; | 478 | }; |
479 | 479 | ||
480 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); | 480 | cpu_dev_register(centaur_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c index 1d181c40e2e1..a1625f5a1e78 100644 --- a/arch/x86/kernel/cpu/centaur_64.c +++ b/arch/x86/kernel/cpu/centaur_64.c | |||
@@ -16,9 +16,10 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | |||
16 | 16 | ||
17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
18 | { | 18 | { |
19 | early_init_centaur(c); | ||
20 | |||
19 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | 21 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { |
20 | c->x86_cache_alignment = c->x86_clflush_size * 2; | 22 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
21 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
22 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 23 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
23 | } | 24 | } |
24 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 25 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
@@ -29,7 +30,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
29 | .c_ident = { "CentaurHauls" }, | 30 | .c_ident = { "CentaurHauls" }, |
30 | .c_early_init = early_init_centaur, | 31 | .c_early_init = early_init_centaur, |
31 | .c_init = init_centaur, | 32 | .c_init = init_centaur, |
33 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
32 | }; | 34 | }; |
33 | 35 | ||
34 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); | 36 | cpu_dev_register(centaur_cpu_dev); |
35 | 37 | ||
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/kernel/cpu/cmpxchg.c new file mode 100644 index 000000000000..2056ccf572cc --- /dev/null +++ b/arch/x86/kernel/cpu/cmpxchg.c | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * cmpxchg*() fallbacks for CPU not supporting these instructions | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | #ifndef CONFIG_X86_CMPXCHG | ||
10 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
11 | { | ||
12 | u8 prev; | ||
13 | unsigned long flags; | ||
14 | |||
15 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
16 | local_irq_save(flags); | ||
17 | prev = *(u8 *)ptr; | ||
18 | if (prev == old) | ||
19 | *(u8 *)ptr = new; | ||
20 | local_irq_restore(flags); | ||
21 | return prev; | ||
22 | } | ||
23 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
24 | |||
25 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
26 | { | ||
27 | u16 prev; | ||
28 | unsigned long flags; | ||
29 | |||
30 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
31 | local_irq_save(flags); | ||
32 | prev = *(u16 *)ptr; | ||
33 | if (prev == old) | ||
34 | *(u16 *)ptr = new; | ||
35 | local_irq_restore(flags); | ||
36 | return prev; | ||
37 | } | ||
38 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
39 | |||
40 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
41 | { | ||
42 | u32 prev; | ||
43 | unsigned long flags; | ||
44 | |||
45 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
46 | local_irq_save(flags); | ||
47 | prev = *(u32 *)ptr; | ||
48 | if (prev == old) | ||
49 | *(u32 *)ptr = new; | ||
50 | local_irq_restore(flags); | ||
51 | return prev; | ||
52 | } | ||
53 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
54 | #endif | ||
55 | |||
56 | #ifndef CONFIG_X86_CMPXCHG64 | ||
57 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
58 | { | ||
59 | u64 prev; | ||
60 | unsigned long flags; | ||
61 | |||
62 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
63 | local_irq_save(flags); | ||
64 | prev = *(u64 *)ptr; | ||
65 | if (prev == old) | ||
66 | *(u64 *)ptr = new; | ||
67 | local_irq_restore(flags); | ||
68 | return prev; | ||
69 | } | ||
70 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
71 | #endif | ||
72 | |||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4e456bd955bb..25581dcb280e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1,28 +1,62 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/kernel.h> | ||
3 | #include <linux/sched.h> | ||
2 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/bootmem.h> | ||
6 | #include <linux/bitops.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/kgdb.h> | ||
9 | #include <linux/topology.h> | ||
3 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
4 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
5 | #include <linux/module.h> | ||
6 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
7 | #include <linux/bootmem.h> | ||
8 | #include <asm/processor.h> | ||
9 | #include <asm/i387.h> | 13 | #include <asm/i387.h> |
10 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
11 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | #include <asm/linkage.h> | ||
12 | #include <asm/mmu_context.h> | 17 | #include <asm/mmu_context.h> |
13 | #include <asm/mtrr.h> | 18 | #include <asm/mtrr.h> |
14 | #include <asm/mce.h> | 19 | #include <asm/mce.h> |
15 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
16 | #include <asm/asm.h> | 21 | #include <asm/asm.h> |
22 | #include <asm/numa.h> | ||
17 | #ifdef CONFIG_X86_LOCAL_APIC | 23 | #ifdef CONFIG_X86_LOCAL_APIC |
18 | #include <asm/mpspec.h> | 24 | #include <asm/mpspec.h> |
19 | #include <asm/apic.h> | 25 | #include <asm/apic.h> |
20 | #include <mach_apic.h> | 26 | #include <mach_apic.h> |
27 | #include <asm/genapic.h> | ||
21 | #endif | 28 | #endif |
22 | 29 | ||
30 | #include <asm/pda.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/processor.h> | ||
33 | #include <asm/desc.h> | ||
34 | #include <asm/atomic.h> | ||
35 | #include <asm/proto.h> | ||
36 | #include <asm/sections.h> | ||
37 | #include <asm/setup.h> | ||
38 | |||
23 | #include "cpu.h" | 39 | #include "cpu.h" |
24 | 40 | ||
41 | static struct cpu_dev *this_cpu __cpuinitdata; | ||
42 | |||
43 | #ifdef CONFIG_X86_64 | ||
44 | /* We need valid kernel segments for data and code in long mode too | ||
45 | * IRET will check the segment types kkeil 2000/10/28 | ||
46 | * Also sysret mandates a special GDT layout | ||
47 | */ | ||
48 | /* The TLS descriptors are currently at a different place compared to i386. | ||
49 | Hopefully nobody expects them at a fixed place (Wine?) */ | ||
25 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 50 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { |
51 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | ||
52 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | ||
53 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | ||
54 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | ||
55 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | ||
56 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | ||
57 | } }; | ||
58 | #else | ||
59 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
26 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 60 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
27 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 61 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
28 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 62 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
@@ -56,17 +90,157 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | |||
56 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 90 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
57 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | 91 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, |
58 | } }; | 92 | } }; |
93 | #endif | ||
59 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 94 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
60 | 95 | ||
61 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 96 | #ifdef CONFIG_X86_32 |
62 | |||
63 | static int cachesize_override __cpuinitdata = -1; | 97 | static int cachesize_override __cpuinitdata = -1; |
64 | static int disable_x86_serial_nr __cpuinitdata = 1; | 98 | static int disable_x86_serial_nr __cpuinitdata = 1; |
65 | 99 | ||
66 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 100 | static int __init cachesize_setup(char *str) |
101 | { | ||
102 | get_option(&str, &cachesize_override); | ||
103 | return 1; | ||
104 | } | ||
105 | __setup("cachesize=", cachesize_setup); | ||
106 | |||
107 | static int __init x86_fxsr_setup(char *s) | ||
108 | { | ||
109 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
110 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
111 | return 1; | ||
112 | } | ||
113 | __setup("nofxsr", x86_fxsr_setup); | ||
114 | |||
115 | static int __init x86_sep_setup(char *s) | ||
116 | { | ||
117 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
118 | return 1; | ||
119 | } | ||
120 | __setup("nosep", x86_sep_setup); | ||
121 | |||
122 | /* Standard macro to see if a specific flag is changeable */ | ||
123 | static inline int flag_is_changeable_p(u32 flag) | ||
124 | { | ||
125 | u32 f1, f2; | ||
126 | |||
127 | /* | ||
128 | * Cyrix and IDT cpus allow disabling of CPUID | ||
129 | * so the code below may return different results | ||
130 | * when it is executed before and after enabling | ||
131 | * the CPUID. Add "volatile" to not allow gcc to | ||
132 | * optimize the subsequent calls to this function. | ||
133 | */ | ||
134 | asm volatile ("pushfl\n\t" | ||
135 | "pushfl\n\t" | ||
136 | "popl %0\n\t" | ||
137 | "movl %0,%1\n\t" | ||
138 | "xorl %2,%0\n\t" | ||
139 | "pushl %0\n\t" | ||
140 | "popfl\n\t" | ||
141 | "pushfl\n\t" | ||
142 | "popl %0\n\t" | ||
143 | "popfl\n\t" | ||
144 | : "=&r" (f1), "=&r" (f2) | ||
145 | : "ir" (flag)); | ||
146 | |||
147 | return ((f1^f2) & flag) != 0; | ||
148 | } | ||
149 | |||
150 | /* Probe for the CPUID instruction */ | ||
151 | static int __cpuinit have_cpuid_p(void) | ||
152 | { | ||
153 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
154 | } | ||
155 | |||
156 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
157 | { | ||
158 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | ||
159 | /* Disable processor serial number */ | ||
160 | unsigned long lo, hi; | ||
161 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
162 | lo |= 0x200000; | ||
163 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
164 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
165 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
166 | |||
167 | /* Disabling the serial number may affect the cpuid level */ | ||
168 | c->cpuid_level = cpuid_eax(0); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | static int __init x86_serial_nr_setup(char *s) | ||
173 | { | ||
174 | disable_x86_serial_nr = 0; | ||
175 | return 1; | ||
176 | } | ||
177 | __setup("serialnumber", x86_serial_nr_setup); | ||
178 | #else | ||
179 | static inline int flag_is_changeable_p(u32 flag) | ||
180 | { | ||
181 | return 1; | ||
182 | } | ||
183 | /* Probe for the CPUID instruction */ | ||
184 | static inline int have_cpuid_p(void) | ||
185 | { | ||
186 | return 1; | ||
187 | } | ||
188 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
189 | { | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | /* | ||
194 | * Naming convention should be: <Name> [(<Codename>)] | ||
195 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
196 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
197 | * | ||
198 | */ | ||
199 | |||
200 | /* Look up CPU names by table lookup. */ | ||
201 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
202 | { | ||
203 | struct cpu_model_info *info; | ||
204 | |||
205 | if (c->x86_model >= 16) | ||
206 | return NULL; /* Range check */ | ||
207 | |||
208 | if (!this_cpu) | ||
209 | return NULL; | ||
210 | |||
211 | info = this_cpu->c_models; | ||
212 | |||
213 | while (info && info->family) { | ||
214 | if (info->family == c->x86) | ||
215 | return info->model_names[c->x86_model]; | ||
216 | info++; | ||
217 | } | ||
218 | return NULL; /* Not found */ | ||
219 | } | ||
220 | |||
221 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
222 | |||
223 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
224 | * it's on the real one. */ | ||
225 | void switch_to_new_gdt(void) | ||
226 | { | ||
227 | struct desc_ptr gdt_descr; | ||
228 | |||
229 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
230 | gdt_descr.size = GDT_SIZE - 1; | ||
231 | load_gdt(&gdt_descr); | ||
232 | #ifdef CONFIG_X86_32 | ||
233 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
234 | #endif | ||
235 | } | ||
236 | |||
237 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
67 | 238 | ||
68 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 239 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
69 | { | 240 | { |
241 | #ifdef CONFIG_X86_64 | ||
242 | display_cacheinfo(c); | ||
243 | #else | ||
70 | /* Not much we can do here... */ | 244 | /* Not much we can do here... */ |
71 | /* Check if at least it has cpuid */ | 245 | /* Check if at least it has cpuid */ |
72 | if (c->cpuid_level == -1) { | 246 | if (c->cpuid_level == -1) { |
@@ -76,28 +250,22 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
76 | else if (c->x86 == 3) | 250 | else if (c->x86 == 3) |
77 | strcpy(c->x86_model_id, "386"); | 251 | strcpy(c->x86_model_id, "386"); |
78 | } | 252 | } |
253 | #endif | ||
79 | } | 254 | } |
80 | 255 | ||
81 | static struct cpu_dev __cpuinitdata default_cpu = { | 256 | static struct cpu_dev __cpuinitdata default_cpu = { |
82 | .c_init = default_init, | 257 | .c_init = default_init, |
83 | .c_vendor = "Unknown", | 258 | .c_vendor = "Unknown", |
259 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
84 | }; | 260 | }; |
85 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
86 | |||
87 | static int __init cachesize_setup(char *str) | ||
88 | { | ||
89 | get_option(&str, &cachesize_override); | ||
90 | return 1; | ||
91 | } | ||
92 | __setup("cachesize=", cachesize_setup); | ||
93 | 261 | ||
94 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | 262 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) |
95 | { | 263 | { |
96 | unsigned int *v; | 264 | unsigned int *v; |
97 | char *p, *q; | 265 | char *p, *q; |
98 | 266 | ||
99 | if (cpuid_eax(0x80000000) < 0x80000004) | 267 | if (c->extended_cpuid_level < 0x80000004) |
100 | return 0; | 268 | return; |
101 | 269 | ||
102 | v = (unsigned int *) c->x86_model_id; | 270 | v = (unsigned int *) c->x86_model_id; |
103 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 271 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
@@ -116,30 +284,34 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
116 | while (q <= &c->x86_model_id[48]) | 284 | while (q <= &c->x86_model_id[48]) |
117 | *q++ = '\0'; /* Zero-pad the rest */ | 285 | *q++ = '\0'; /* Zero-pad the rest */ |
118 | } | 286 | } |
119 | |||
120 | return 1; | ||
121 | } | 287 | } |
122 | 288 | ||
123 | |||
124 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 289 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
125 | { | 290 | { |
126 | unsigned int n, dummy, ecx, edx, l2size; | 291 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
127 | 292 | ||
128 | n = cpuid_eax(0x80000000); | 293 | n = c->extended_cpuid_level; |
129 | 294 | ||
130 | if (n >= 0x80000005) { | 295 | if (n >= 0x80000005) { |
131 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 296 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
132 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 297 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
133 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 298 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
134 | c->x86_cache_size = (ecx>>24)+(edx>>24); | 299 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
300 | #ifdef CONFIG_X86_64 | ||
301 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
302 | c->x86_tlbsize = 0; | ||
303 | #endif | ||
135 | } | 304 | } |
136 | 305 | ||
137 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 306 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
138 | return; | 307 | return; |
139 | 308 | ||
140 | ecx = cpuid_ecx(0x80000006); | 309 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
141 | l2size = ecx >> 16; | 310 | l2size = ecx >> 16; |
142 | 311 | ||
312 | #ifdef CONFIG_X86_64 | ||
313 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
314 | #else | ||
143 | /* do processor-specific cache resizing */ | 315 | /* do processor-specific cache resizing */ |
144 | if (this_cpu->c_size_cache) | 316 | if (this_cpu->c_size_cache) |
145 | l2size = this_cpu->c_size_cache(c, l2size); | 317 | l2size = this_cpu->c_size_cache(c, l2size); |
@@ -150,116 +322,106 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
150 | 322 | ||
151 | if (l2size == 0) | 323 | if (l2size == 0) |
152 | return; /* Again, no L2 cache is possible */ | 324 | return; /* Again, no L2 cache is possible */ |
325 | #endif | ||
153 | 326 | ||
154 | c->x86_cache_size = l2size; | 327 | c->x86_cache_size = l2size; |
155 | 328 | ||
156 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 329 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
157 | l2size, ecx & 0xFF); | 330 | l2size, ecx & 0xFF); |
158 | } | 331 | } |
159 | 332 | ||
160 | /* | 333 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
161 | * Naming convention should be: <Name> [(<Codename>)] | ||
162 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
163 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
164 | * | ||
165 | */ | ||
166 | |||
167 | /* Look up CPU names by table lookup. */ | ||
168 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
169 | { | 334 | { |
170 | struct cpu_model_info *info; | 335 | #ifdef CONFIG_X86_HT |
336 | u32 eax, ebx, ecx, edx; | ||
337 | int index_msb, core_bits; | ||
171 | 338 | ||
172 | if (c->x86_model >= 16) | 339 | if (!cpu_has(c, X86_FEATURE_HT)) |
173 | return NULL; /* Range check */ | 340 | return; |
174 | 341 | ||
175 | if (!this_cpu) | 342 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
176 | return NULL; | 343 | goto out; |
177 | 344 | ||
178 | info = this_cpu->c_models; | 345 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
346 | return; | ||
179 | 347 | ||
180 | while (info && info->family) { | 348 | cpuid(1, &eax, &ebx, &ecx, &edx); |
181 | if (info->family == c->x86) | 349 | |
182 | return info->model_names[c->x86_model]; | 350 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
183 | info++; | 351 | |
352 | if (smp_num_siblings == 1) { | ||
353 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
354 | } else if (smp_num_siblings > 1) { | ||
355 | |||
356 | if (smp_num_siblings > NR_CPUS) { | ||
357 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | ||
358 | smp_num_siblings); | ||
359 | smp_num_siblings = 1; | ||
360 | return; | ||
361 | } | ||
362 | |||
363 | index_msb = get_count_order(smp_num_siblings); | ||
364 | #ifdef CONFIG_X86_64 | ||
365 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
366 | #else | ||
367 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
368 | #endif | ||
369 | |||
370 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
371 | |||
372 | index_msb = get_count_order(smp_num_siblings); | ||
373 | |||
374 | core_bits = get_count_order(c->x86_max_cores); | ||
375 | |||
376 | #ifdef CONFIG_X86_64 | ||
377 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
378 | ((1 << core_bits) - 1); | ||
379 | #else | ||
380 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
381 | ((1 << core_bits) - 1); | ||
382 | #endif | ||
184 | } | 383 | } |
185 | return NULL; /* Not found */ | ||
186 | } | ||
187 | 384 | ||
385 | out: | ||
386 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
387 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
388 | c->phys_proc_id); | ||
389 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
390 | c->cpu_core_id); | ||
391 | } | ||
392 | #endif | ||
393 | } | ||
188 | 394 | ||
189 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 395 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
190 | { | 396 | { |
191 | char *v = c->x86_vendor_id; | 397 | char *v = c->x86_vendor_id; |
192 | int i; | 398 | int i; |
193 | static int printed; | 399 | static int printed; |
194 | 400 | ||
195 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 401 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
196 | if (cpu_devs[i]) { | 402 | if (!cpu_devs[i]) |
197 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 403 | break; |
198 | (cpu_devs[i]->c_ident[1] && | 404 | |
199 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 405 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
200 | c->x86_vendor = i; | 406 | (cpu_devs[i]->c_ident[1] && |
201 | if (!early) | 407 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
202 | this_cpu = cpu_devs[i]; | 408 | this_cpu = cpu_devs[i]; |
203 | return; | 409 | c->x86_vendor = this_cpu->c_x86_vendor; |
204 | } | 410 | return; |
205 | } | 411 | } |
206 | } | 412 | } |
413 | |||
207 | if (!printed) { | 414 | if (!printed) { |
208 | printed++; | 415 | printed++; |
209 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | 416 | printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); |
210 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 417 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
211 | } | 418 | } |
419 | |||
212 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 420 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
213 | this_cpu = &default_cpu; | 421 | this_cpu = &default_cpu; |
214 | } | 422 | } |
215 | 423 | ||
216 | 424 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |
217 | static int __init x86_fxsr_setup(char *s) | ||
218 | { | ||
219 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
220 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
221 | return 1; | ||
222 | } | ||
223 | __setup("nofxsr", x86_fxsr_setup); | ||
224 | |||
225 | |||
226 | static int __init x86_sep_setup(char *s) | ||
227 | { | ||
228 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
229 | return 1; | ||
230 | } | ||
231 | __setup("nosep", x86_sep_setup); | ||
232 | |||
233 | |||
234 | /* Standard macro to see if a specific flag is changeable */ | ||
235 | static inline int flag_is_changeable_p(u32 flag) | ||
236 | { | ||
237 | u32 f1, f2; | ||
238 | |||
239 | asm("pushfl\n\t" | ||
240 | "pushfl\n\t" | ||
241 | "popl %0\n\t" | ||
242 | "movl %0,%1\n\t" | ||
243 | "xorl %2,%0\n\t" | ||
244 | "pushl %0\n\t" | ||
245 | "popfl\n\t" | ||
246 | "pushfl\n\t" | ||
247 | "popl %0\n\t" | ||
248 | "popfl\n\t" | ||
249 | : "=&r" (f1), "=&r" (f2) | ||
250 | : "ir" (flag)); | ||
251 | |||
252 | return ((f1^f2) & flag) != 0; | ||
253 | } | ||
254 | |||
255 | |||
256 | /* Probe for the CPUID instruction */ | ||
257 | static int __cpuinit have_cpuid_p(void) | ||
258 | { | ||
259 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
260 | } | ||
261 | |||
262 | void __init cpu_detect(struct cpuinfo_x86 *c) | ||
263 | { | 425 | { |
264 | /* Get vendor name */ | 426 | /* Get vendor name */ |
265 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 427 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -268,50 +430,87 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
268 | (unsigned int *)&c->x86_vendor_id[4]); | 430 | (unsigned int *)&c->x86_vendor_id[4]); |
269 | 431 | ||
270 | c->x86 = 4; | 432 | c->x86 = 4; |
433 | /* Intel-defined flags: level 0x00000001 */ | ||
271 | if (c->cpuid_level >= 0x00000001) { | 434 | if (c->cpuid_level >= 0x00000001) { |
272 | u32 junk, tfms, cap0, misc; | 435 | u32 junk, tfms, cap0, misc; |
273 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 436 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
274 | c->x86 = (tfms >> 8) & 15; | 437 | c->x86 = (tfms >> 8) & 0xf; |
275 | c->x86_model = (tfms >> 4) & 15; | 438 | c->x86_model = (tfms >> 4) & 0xf; |
439 | c->x86_mask = tfms & 0xf; | ||
276 | if (c->x86 == 0xf) | 440 | if (c->x86 == 0xf) |
277 | c->x86 += (tfms >> 20) & 0xff; | 441 | c->x86 += (tfms >> 20) & 0xff; |
278 | if (c->x86 >= 0x6) | 442 | if (c->x86 >= 0x6) |
279 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 443 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
280 | c->x86_mask = tfms & 15; | ||
281 | if (cap0 & (1<<19)) { | 444 | if (cap0 & (1<<19)) { |
282 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
283 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 445 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
446 | c->x86_cache_alignment = c->x86_clflush_size; | ||
284 | } | 447 | } |
285 | } | 448 | } |
286 | } | 449 | } |
287 | static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | 450 | |
451 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | ||
288 | { | 452 | { |
289 | u32 tfms, xlvl; | 453 | u32 tfms, xlvl; |
290 | unsigned int ebx; | 454 | u32 ebx; |
291 | 455 | ||
292 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 456 | /* Intel-defined flags: level 0x00000001 */ |
293 | if (have_cpuid_p()) { | 457 | if (c->cpuid_level >= 0x00000001) { |
294 | /* Intel-defined flags: level 0x00000001 */ | 458 | u32 capability, excap; |
295 | if (c->cpuid_level >= 0x00000001) { | 459 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
296 | u32 capability, excap; | 460 | c->x86_capability[0] = capability; |
297 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 461 | c->x86_capability[4] = excap; |
298 | c->x86_capability[0] = capability; | 462 | } |
299 | c->x86_capability[4] = excap; | ||
300 | } | ||
301 | 463 | ||
302 | /* AMD-defined flags: level 0x80000001 */ | 464 | /* AMD-defined flags: level 0x80000001 */ |
303 | xlvl = cpuid_eax(0x80000000); | 465 | xlvl = cpuid_eax(0x80000000); |
304 | if ((xlvl & 0xffff0000) == 0x80000000) { | 466 | c->extended_cpuid_level = xlvl; |
305 | if (xlvl >= 0x80000001) { | 467 | if ((xlvl & 0xffff0000) == 0x80000000) { |
306 | c->x86_capability[1] = cpuid_edx(0x80000001); | 468 | if (xlvl >= 0x80000001) { |
307 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 469 | c->x86_capability[1] = cpuid_edx(0x80000001); |
308 | } | 470 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
309 | } | 471 | } |
472 | } | ||
473 | |||
474 | #ifdef CONFIG_X86_64 | ||
475 | if (c->extended_cpuid_level >= 0x80000008) { | ||
476 | u32 eax = cpuid_eax(0x80000008); | ||
310 | 477 | ||
478 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
479 | c->x86_phys_bits = eax & 0xff; | ||
311 | } | 480 | } |
481 | #endif | ||
482 | |||
483 | if (c->extended_cpuid_level >= 0x80000007) | ||
484 | c->x86_power = cpuid_edx(0x80000007); | ||
312 | 485 | ||
313 | } | 486 | } |
314 | 487 | ||
488 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | ||
489 | { | ||
490 | #ifdef CONFIG_X86_32 | ||
491 | int i; | ||
492 | |||
493 | /* | ||
494 | * First of all, decide if this is a 486 or higher | ||
495 | * It's a 486 if we can modify the AC flag | ||
496 | */ | ||
497 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
498 | c->x86 = 4; | ||
499 | else | ||
500 | c->x86 = 3; | ||
501 | |||
502 | for (i = 0; i < X86_VENDOR_NUM; i++) | ||
503 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { | ||
504 | c->x86_vendor_id[0] = 0; | ||
505 | cpu_devs[i]->c_identify(c); | ||
506 | if (c->x86_vendor_id[0]) { | ||
507 | get_cpu_vendor(c); | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | #endif | ||
512 | } | ||
513 | |||
315 | /* | 514 | /* |
316 | * Do minimum CPU detection early. | 515 | * Do minimum CPU detection early. |
317 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 516 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
@@ -321,25 +520,61 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
321 | * WARNING: this function is only called on the BP. Don't add code here | 520 | * WARNING: this function is only called on the BP. Don't add code here |
322 | * that is supposed to run on all CPUs. | 521 | * that is supposed to run on all CPUs. |
323 | */ | 522 | */ |
324 | static void __init early_cpu_detect(void) | 523 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
325 | { | 524 | { |
326 | struct cpuinfo_x86 *c = &boot_cpu_data; | 525 | #ifdef CONFIG_X86_64 |
327 | 526 | c->x86_clflush_size = 64; | |
328 | c->x86_cache_alignment = 32; | 527 | #else |
329 | c->x86_clflush_size = 32; | 528 | c->x86_clflush_size = 32; |
529 | #endif | ||
530 | c->x86_cache_alignment = c->x86_clflush_size; | ||
531 | |||
532 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
533 | c->extended_cpuid_level = 0; | ||
534 | |||
535 | if (!have_cpuid_p()) | ||
536 | identify_cpu_without_cpuid(c); | ||
330 | 537 | ||
538 | /* cyrix could have cpuid enabled via c_identify()*/ | ||
331 | if (!have_cpuid_p()) | 539 | if (!have_cpuid_p()) |
332 | return; | 540 | return; |
333 | 541 | ||
334 | cpu_detect(c); | 542 | cpu_detect(c); |
335 | 543 | ||
336 | get_cpu_vendor(c, 1); | 544 | get_cpu_vendor(c); |
545 | |||
546 | get_cpu_cap(c); | ||
337 | 547 | ||
338 | early_get_cap(c); | 548 | if (this_cpu->c_early_init) |
549 | this_cpu->c_early_init(c); | ||
339 | 550 | ||
340 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 551 | validate_pat_support(c); |
341 | cpu_devs[c->x86_vendor]->c_early_init) | 552 | } |
342 | cpu_devs[c->x86_vendor]->c_early_init(c); | 553 | |
554 | void __init early_cpu_init(void) | ||
555 | { | ||
556 | struct cpu_dev **cdev; | ||
557 | int count = 0; | ||
558 | |||
559 | printk("KERNEL supported cpus:\n"); | ||
560 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | ||
561 | struct cpu_dev *cpudev = *cdev; | ||
562 | unsigned int j; | ||
563 | |||
564 | if (count >= X86_VENDOR_NUM) | ||
565 | break; | ||
566 | cpu_devs[count] = cpudev; | ||
567 | count++; | ||
568 | |||
569 | for (j = 0; j < 2; j++) { | ||
570 | if (!cpudev->c_ident[j]) | ||
571 | continue; | ||
572 | printk(" %s %s\n", cpudev->c_vendor, | ||
573 | cpudev->c_ident[j]); | ||
574 | } | ||
575 | } | ||
576 | |||
577 | early_identify_cpu(&boot_cpu_data); | ||
343 | } | 578 | } |
344 | 579 | ||
345 | /* | 580 | /* |
@@ -357,86 +592,41 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | |||
357 | 592 | ||
358 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 593 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
359 | { | 594 | { |
360 | u32 tfms, xlvl; | 595 | c->extended_cpuid_level = 0; |
361 | unsigned int ebx; | ||
362 | |||
363 | if (have_cpuid_p()) { | ||
364 | /* Get vendor name */ | ||
365 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
366 | (unsigned int *)&c->x86_vendor_id[0], | ||
367 | (unsigned int *)&c->x86_vendor_id[8], | ||
368 | (unsigned int *)&c->x86_vendor_id[4]); | ||
369 | |||
370 | get_cpu_vendor(c, 0); | ||
371 | /* Initialize the standard set of capabilities */ | ||
372 | /* Note that the vendor-specific code below might override */ | ||
373 | /* Intel-defined flags: level 0x00000001 */ | ||
374 | if (c->cpuid_level >= 0x00000001) { | ||
375 | u32 capability, excap; | ||
376 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | ||
377 | c->x86_capability[0] = capability; | ||
378 | c->x86_capability[4] = excap; | ||
379 | c->x86 = (tfms >> 8) & 15; | ||
380 | c->x86_model = (tfms >> 4) & 15; | ||
381 | if (c->x86 == 0xf) | ||
382 | c->x86 += (tfms >> 20) & 0xff; | ||
383 | if (c->x86 >= 0x6) | ||
384 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
385 | c->x86_mask = tfms & 15; | ||
386 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
387 | #ifdef CONFIG_X86_HT | ||
388 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
389 | c->phys_proc_id = c->initial_apicid; | ||
390 | #else | ||
391 | c->apicid = c->initial_apicid; | ||
392 | #endif | ||
393 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
394 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | ||
395 | } else { | ||
396 | /* Have CPUID level 0 only - unheard of */ | ||
397 | c->x86 = 4; | ||
398 | } | ||
399 | 596 | ||
400 | /* AMD-defined flags: level 0x80000001 */ | 597 | if (!have_cpuid_p()) |
401 | xlvl = cpuid_eax(0x80000000); | 598 | identify_cpu_without_cpuid(c); |
402 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
403 | if (xlvl >= 0x80000001) { | ||
404 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
405 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
406 | } | ||
407 | if (xlvl >= 0x80000004) | ||
408 | get_model_name(c); /* Default name */ | ||
409 | } | ||
410 | 599 | ||
411 | init_scattered_cpuid_features(c); | 600 | /* cyrix could have cpuid enabled via c_identify()*/ |
412 | detect_nopl(c); | 601 | if (!have_cpuid_p()) |
413 | } | 602 | return; |
414 | } | ||
415 | 603 | ||
416 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 604 | cpu_detect(c); |
417 | { | ||
418 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | ||
419 | /* Disable processor serial number */ | ||
420 | unsigned long lo, hi; | ||
421 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
422 | lo |= 0x200000; | ||
423 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
424 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
425 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
426 | 605 | ||
427 | /* Disabling the serial number may affect the cpuid level */ | 606 | get_cpu_vendor(c); |
428 | c->cpuid_level = cpuid_eax(0); | ||
429 | } | ||
430 | } | ||
431 | 607 | ||
432 | static int __init x86_serial_nr_setup(char *s) | 608 | get_cpu_cap(c); |
433 | { | 609 | |
434 | disable_x86_serial_nr = 0; | 610 | if (c->cpuid_level >= 0x00000001) { |
435 | return 1; | 611 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
436 | } | 612 | #ifdef CONFIG_X86_32 |
437 | __setup("serialnumber", x86_serial_nr_setup); | 613 | # ifdef CONFIG_X86_HT |
614 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
615 | # else | ||
616 | c->apicid = c->initial_apicid; | ||
617 | # endif | ||
618 | #endif | ||
438 | 619 | ||
620 | #ifdef CONFIG_X86_HT | ||
621 | c->phys_proc_id = c->initial_apicid; | ||
622 | #endif | ||
623 | } | ||
439 | 624 | ||
625 | get_model_name(c); /* Default name */ | ||
626 | |||
627 | init_scattered_cpuid_features(c); | ||
628 | detect_nopl(c); | ||
629 | } | ||
440 | 630 | ||
441 | /* | 631 | /* |
442 | * This does the hard work of actually picking apart the CPU stuff... | 632 | * This does the hard work of actually picking apart the CPU stuff... |
@@ -448,30 +638,29 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
448 | c->loops_per_jiffy = loops_per_jiffy; | 638 | c->loops_per_jiffy = loops_per_jiffy; |
449 | c->x86_cache_size = -1; | 639 | c->x86_cache_size = -1; |
450 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 640 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
451 | c->cpuid_level = -1; /* CPUID not detected */ | ||
452 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | 641 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
453 | c->x86_vendor_id[0] = '\0'; /* Unset */ | 642 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
454 | c->x86_model_id[0] = '\0'; /* Unset */ | 643 | c->x86_model_id[0] = '\0'; /* Unset */ |
455 | c->x86_max_cores = 1; | 644 | c->x86_max_cores = 1; |
645 | c->x86_coreid_bits = 0; | ||
646 | #ifdef CONFIG_X86_64 | ||
647 | c->x86_clflush_size = 64; | ||
648 | #else | ||
649 | c->cpuid_level = -1; /* CPUID not detected */ | ||
456 | c->x86_clflush_size = 32; | 650 | c->x86_clflush_size = 32; |
651 | #endif | ||
652 | c->x86_cache_alignment = c->x86_clflush_size; | ||
457 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 653 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
458 | 654 | ||
459 | if (!have_cpuid_p()) { | ||
460 | /* | ||
461 | * First of all, decide if this is a 486 or higher | ||
462 | * It's a 486 if we can modify the AC flag | ||
463 | */ | ||
464 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
465 | c->x86 = 4; | ||
466 | else | ||
467 | c->x86 = 3; | ||
468 | } | ||
469 | |||
470 | generic_identify(c); | 655 | generic_identify(c); |
471 | 656 | ||
472 | if (this_cpu->c_identify) | 657 | if (this_cpu->c_identify) |
473 | this_cpu->c_identify(c); | 658 | this_cpu->c_identify(c); |
474 | 659 | ||
660 | #ifdef CONFIG_X86_64 | ||
661 | c->apicid = phys_pkg_id(0); | ||
662 | #endif | ||
663 | |||
475 | /* | 664 | /* |
476 | * Vendor-specific initialization. In this section we | 665 | * Vendor-specific initialization. In this section we |
477 | * canonicalize the feature flags, meaning if there are | 666 | * canonicalize the feature flags, meaning if there are |
@@ -505,6 +694,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
505 | c->x86, c->x86_model); | 694 | c->x86, c->x86_model); |
506 | } | 695 | } |
507 | 696 | ||
697 | #ifdef CONFIG_X86_64 | ||
698 | detect_ht(c); | ||
699 | #endif | ||
700 | |||
508 | /* | 701 | /* |
509 | * On SMP, boot_cpu_data holds the common feature set between | 702 | * On SMP, boot_cpu_data holds the common feature set between |
510 | * all CPUs; so make sure that we indicate which features are | 703 | * all CPUs; so make sure that we indicate which features are |
@@ -513,7 +706,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
513 | */ | 706 | */ |
514 | if (c != &boot_cpu_data) { | 707 | if (c != &boot_cpu_data) { |
515 | /* AND the already accumulated flags with these */ | 708 | /* AND the already accumulated flags with these */ |
516 | for (i = 0 ; i < NCAPINTS ; i++) | 709 | for (i = 0; i < NCAPINTS; i++) |
517 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 710 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
518 | } | 711 | } |
519 | 712 | ||
@@ -521,72 +714,91 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
521 | for (i = 0; i < NCAPINTS; i++) | 714 | for (i = 0; i < NCAPINTS; i++) |
522 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | 715 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; |
523 | 716 | ||
717 | #ifdef CONFIG_X86_MCE | ||
524 | /* Init Machine Check Exception if available. */ | 718 | /* Init Machine Check Exception if available. */ |
525 | mcheck_init(c); | 719 | mcheck_init(c); |
720 | #endif | ||
526 | 721 | ||
527 | select_idle_routine(c); | 722 | select_idle_routine(c); |
723 | |||
724 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
725 | numa_add_cpu(smp_processor_id()); | ||
726 | #endif | ||
528 | } | 727 | } |
529 | 728 | ||
729 | #ifdef CONFIG_X86_64 | ||
730 | static void vgetcpu_set_mode(void) | ||
731 | { | ||
732 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) | ||
733 | vgetcpu_mode = VGETCPU_RDTSCP; | ||
734 | else | ||
735 | vgetcpu_mode = VGETCPU_LSL; | ||
736 | } | ||
737 | #endif | ||
738 | |||
530 | void __init identify_boot_cpu(void) | 739 | void __init identify_boot_cpu(void) |
531 | { | 740 | { |
532 | identify_cpu(&boot_cpu_data); | 741 | identify_cpu(&boot_cpu_data); |
742 | #ifdef CONFIG_X86_32 | ||
533 | sysenter_setup(); | 743 | sysenter_setup(); |
534 | enable_sep_cpu(); | 744 | enable_sep_cpu(); |
745 | #else | ||
746 | vgetcpu_set_mode(); | ||
747 | #endif | ||
535 | } | 748 | } |
536 | 749 | ||
537 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 750 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
538 | { | 751 | { |
539 | BUG_ON(c == &boot_cpu_data); | 752 | BUG_ON(c == &boot_cpu_data); |
540 | identify_cpu(c); | 753 | identify_cpu(c); |
754 | #ifdef CONFIG_X86_32 | ||
541 | enable_sep_cpu(); | 755 | enable_sep_cpu(); |
756 | #endif | ||
542 | mtrr_ap_init(); | 757 | mtrr_ap_init(); |
543 | } | 758 | } |
544 | 759 | ||
545 | #ifdef CONFIG_X86_HT | 760 | struct msr_range { |
546 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 761 | unsigned min; |
547 | { | 762 | unsigned max; |
548 | u32 eax, ebx, ecx, edx; | 763 | }; |
549 | int index_msb, core_bits; | ||
550 | |||
551 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
552 | |||
553 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
554 | return; | ||
555 | |||
556 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
557 | 764 | ||
558 | if (smp_num_siblings == 1) { | 765 | static struct msr_range msr_range_array[] __cpuinitdata = { |
559 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 766 | { 0x00000000, 0x00000418}, |
560 | } else if (smp_num_siblings > 1) { | 767 | { 0xc0000000, 0xc000040b}, |
768 | { 0xc0010000, 0xc0010142}, | ||
769 | { 0xc0011000, 0xc001103b}, | ||
770 | }; | ||
561 | 771 | ||
562 | if (smp_num_siblings > NR_CPUS) { | 772 | static void __cpuinit print_cpu_msr(void) |
563 | printk(KERN_WARNING "CPU: Unsupported number of the " | 773 | { |
564 | "siblings %d", smp_num_siblings); | 774 | unsigned index; |
565 | smp_num_siblings = 1; | 775 | u64 val; |
566 | return; | 776 | int i; |
777 | unsigned index_min, index_max; | ||
778 | |||
779 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
780 | index_min = msr_range_array[i].min; | ||
781 | index_max = msr_range_array[i].max; | ||
782 | for (index = index_min; index < index_max; index++) { | ||
783 | if (rdmsrl_amd_safe(index, &val)) | ||
784 | continue; | ||
785 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
567 | } | 786 | } |
787 | } | ||
788 | } | ||
568 | 789 | ||
569 | index_msb = get_count_order(smp_num_siblings); | 790 | static int show_msr __cpuinitdata; |
570 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | 791 | static __init int setup_show_msr(char *arg) |
571 | 792 | { | |
572 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 793 | int num; |
573 | c->phys_proc_id); | ||
574 | |||
575 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
576 | |||
577 | index_msb = get_count_order(smp_num_siblings) ; | ||
578 | |||
579 | core_bits = get_count_order(c->x86_max_cores); | ||
580 | 794 | ||
581 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | 795 | get_option(&arg, &num); |
582 | ((1 << core_bits) - 1); | ||
583 | 796 | ||
584 | if (c->x86_max_cores > 1) | 797 | if (num > 0) |
585 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 798 | show_msr = num; |
586 | c->cpu_core_id); | 799 | return 1; |
587 | } | ||
588 | } | 800 | } |
589 | #endif | 801 | __setup("show_msr=", setup_show_msr); |
590 | 802 | ||
591 | static __init int setup_noclflush(char *arg) | 803 | static __init int setup_noclflush(char *arg) |
592 | { | 804 | { |
@@ -604,18 +816,26 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
604 | else if (c->cpuid_level >= 0) | 816 | else if (c->cpuid_level >= 0) |
605 | vendor = c->x86_vendor_id; | 817 | vendor = c->x86_vendor_id; |
606 | 818 | ||
607 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | 819 | if (vendor && !strstr(c->x86_model_id, vendor)) |
608 | printk("%s ", vendor); | 820 | printk(KERN_CONT "%s ", vendor); |
609 | 821 | ||
610 | if (!c->x86_model_id[0]) | 822 | if (c->x86_model_id[0]) |
611 | printk("%d86", c->x86); | 823 | printk(KERN_CONT "%s", c->x86_model_id); |
612 | else | 824 | else |
613 | printk("%s", c->x86_model_id); | 825 | printk(KERN_CONT "%d86", c->x86); |
614 | 826 | ||
615 | if (c->x86_mask || c->cpuid_level >= 0) | 827 | if (c->x86_mask || c->cpuid_level >= 0) |
616 | printk(" stepping %02x\n", c->x86_mask); | 828 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
617 | else | 829 | else |
618 | printk("\n"); | 830 | printk(KERN_CONT "\n"); |
831 | |||
832 | #ifdef CONFIG_SMP | ||
833 | if (c->cpu_index < show_msr) | ||
834 | print_cpu_msr(); | ||
835 | #else | ||
836 | if (show_msr) | ||
837 | print_cpu_msr(); | ||
838 | #endif | ||
619 | } | 839 | } |
620 | 840 | ||
621 | static __init int setup_disablecpuid(char *arg) | 841 | static __init int setup_disablecpuid(char *arg) |
@@ -631,19 +851,89 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
631 | 851 | ||
632 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 852 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
633 | 853 | ||
634 | void __init early_cpu_init(void) | 854 | #ifdef CONFIG_X86_64 |
855 | struct x8664_pda **_cpu_pda __read_mostly; | ||
856 | EXPORT_SYMBOL(_cpu_pda); | ||
857 | |||
858 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | ||
859 | |||
860 | char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | ||
861 | |||
862 | void __cpuinit pda_init(int cpu) | ||
635 | { | 863 | { |
636 | struct cpu_vendor_dev *cvdev; | 864 | struct x8664_pda *pda = cpu_pda(cpu); |
865 | |||
866 | /* Setup up data that may be needed in __get_free_pages early */ | ||
867 | loadsegment(fs, 0); | ||
868 | loadsegment(gs, 0); | ||
869 | /* Memory clobbers used to order PDA accessed */ | ||
870 | mb(); | ||
871 | wrmsrl(MSR_GS_BASE, pda); | ||
872 | mb(); | ||
873 | |||
874 | pda->cpunumber = cpu; | ||
875 | pda->irqcount = -1; | ||
876 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
877 | PDA_STACKOFFSET + THREAD_SIZE; | ||
878 | pda->active_mm = &init_mm; | ||
879 | pda->mmu_state = 0; | ||
880 | |||
881 | if (cpu == 0) { | ||
882 | /* others are initialized in smpboot.c */ | ||
883 | pda->pcurrent = &init_task; | ||
884 | pda->irqstackptr = boot_cpu_stack; | ||
885 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
886 | } else { | ||
887 | if (!pda->irqstackptr) { | ||
888 | pda->irqstackptr = (char *) | ||
889 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
890 | if (!pda->irqstackptr) | ||
891 | panic("cannot allocate irqstack for cpu %d", | ||
892 | cpu); | ||
893 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
894 | } | ||
895 | |||
896 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | ||
897 | pda->nodenumber = cpu_to_node(cpu); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | ||
902 | DEBUG_STKSZ] __page_aligned_bss; | ||
637 | 903 | ||
638 | for (cvdev = __x86cpuvendor_start ; | 904 | extern asmlinkage void ignore_sysret(void); |
639 | cvdev < __x86cpuvendor_end ; | ||
640 | cvdev++) | ||
641 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
642 | 905 | ||
643 | early_cpu_detect(); | 906 | /* May not be marked __init: used by software suspend */ |
644 | validate_pat_support(&boot_cpu_data); | 907 | void syscall_init(void) |
908 | { | ||
909 | /* | ||
910 | * LSTAR and STAR live in a bit strange symbiosis. | ||
911 | * They both write to the same internal register. STAR allows to | ||
912 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | ||
913 | */ | ||
914 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | ||
915 | wrmsrl(MSR_LSTAR, system_call); | ||
916 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
917 | |||
918 | #ifdef CONFIG_IA32_EMULATION | ||
919 | syscall32_cpu_init(); | ||
920 | #endif | ||
921 | |||
922 | /* Flags to clear on syscall */ | ||
923 | wrmsrl(MSR_SYSCALL_MASK, | ||
924 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | ||
645 | } | 925 | } |
646 | 926 | ||
927 | unsigned long kernel_eflags; | ||
928 | |||
929 | /* | ||
930 | * Copies of the original ist values from the tss are only accessed during | ||
931 | * debugging, no special alignment required. | ||
932 | */ | ||
933 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | ||
934 | |||
935 | #else | ||
936 | |||
647 | /* Make sure %fs is initialized properly in idle threads */ | 937 | /* Make sure %fs is initialized properly in idle threads */ |
648 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 938 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
649 | { | 939 | { |
@@ -651,25 +941,136 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
651 | regs->fs = __KERNEL_PERCPU; | 941 | regs->fs = __KERNEL_PERCPU; |
652 | return regs; | 942 | return regs; |
653 | } | 943 | } |
654 | 944 | #endif | |
655 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
656 | * it's on the real one. */ | ||
657 | void switch_to_new_gdt(void) | ||
658 | { | ||
659 | struct desc_ptr gdt_descr; | ||
660 | |||
661 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
662 | gdt_descr.size = GDT_SIZE - 1; | ||
663 | load_gdt(&gdt_descr); | ||
664 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
665 | } | ||
666 | 945 | ||
667 | /* | 946 | /* |
668 | * cpu_init() initializes state that is per-CPU. Some data is already | 947 | * cpu_init() initializes state that is per-CPU. Some data is already |
669 | * initialized (naturally) in the bootstrap process, such as the GDT | 948 | * initialized (naturally) in the bootstrap process, such as the GDT |
670 | * and IDT. We reload them nevertheless, this function acts as a | 949 | * and IDT. We reload them nevertheless, this function acts as a |
671 | * 'CPU state barrier', nothing should get across. | 950 | * 'CPU state barrier', nothing should get across. |
951 | * A lot of state is already set up in PDA init for 64 bit | ||
672 | */ | 952 | */ |
953 | #ifdef CONFIG_X86_64 | ||
954 | void __cpuinit cpu_init(void) | ||
955 | { | ||
956 | int cpu = stack_smp_processor_id(); | ||
957 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
958 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
959 | unsigned long v; | ||
960 | char *estacks = NULL; | ||
961 | struct task_struct *me; | ||
962 | int i; | ||
963 | |||
964 | /* CPU 0 is initialised in head64.c */ | ||
965 | if (cpu != 0) | ||
966 | pda_init(cpu); | ||
967 | else | ||
968 | estacks = boot_exception_stacks; | ||
969 | |||
970 | me = current; | ||
971 | |||
972 | if (cpu_test_and_set(cpu, cpu_initialized)) | ||
973 | panic("CPU#%d already initialized!\n", cpu); | ||
974 | |||
975 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
976 | |||
977 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
978 | |||
979 | /* | ||
980 | * Initialize the per-CPU GDT with the boot GDT, | ||
981 | * and set up the GDT descriptor: | ||
982 | */ | ||
983 | |||
984 | switch_to_new_gdt(); | ||
985 | load_idt((const struct desc_ptr *)&idt_descr); | ||
986 | |||
987 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | ||
988 | syscall_init(); | ||
989 | |||
990 | wrmsrl(MSR_FS_BASE, 0); | ||
991 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | ||
992 | barrier(); | ||
993 | |||
994 | check_efer(); | ||
995 | if (cpu != 0 && x2apic) | ||
996 | enable_x2apic(); | ||
997 | |||
998 | /* | ||
999 | * set up and load the per-CPU TSS | ||
1000 | */ | ||
1001 | if (!orig_ist->ist[0]) { | ||
1002 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
1003 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
1004 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
1005 | }; | ||
1006 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | ||
1007 | if (cpu) { | ||
1008 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
1009 | if (!estacks) | ||
1010 | panic("Cannot allocate exception " | ||
1011 | "stack %ld %d\n", v, cpu); | ||
1012 | } | ||
1013 | estacks += PAGE_SIZE << order[v]; | ||
1014 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
1015 | (unsigned long)estacks; | ||
1016 | } | ||
1017 | } | ||
1018 | |||
1019 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
1020 | /* | ||
1021 | * <= is required because the CPU will access up to | ||
1022 | * 8 bits beyond the end of the IO permission bitmap. | ||
1023 | */ | ||
1024 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | ||
1025 | t->io_bitmap[i] = ~0UL; | ||
1026 | |||
1027 | atomic_inc(&init_mm.mm_count); | ||
1028 | me->active_mm = &init_mm; | ||
1029 | if (me->mm) | ||
1030 | BUG(); | ||
1031 | enter_lazy_tlb(&init_mm, me); | ||
1032 | |||
1033 | load_sp0(t, ¤t->thread); | ||
1034 | set_tss_desc(cpu, t); | ||
1035 | load_TR_desc(); | ||
1036 | load_LDT(&init_mm.context); | ||
1037 | |||
1038 | #ifdef CONFIG_KGDB | ||
1039 | /* | ||
1040 | * If the kgdb is connected no debug regs should be altered. This | ||
1041 | * is only applicable when KGDB and a KGDB I/O module are built | ||
1042 | * into the kernel and you are using early debugging with | ||
1043 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
1044 | */ | ||
1045 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
1046 | arch_kgdb_ops.correct_hw_break(); | ||
1047 | else { | ||
1048 | #endif | ||
1049 | /* | ||
1050 | * Clear all 6 debug registers: | ||
1051 | */ | ||
1052 | |||
1053 | set_debugreg(0UL, 0); | ||
1054 | set_debugreg(0UL, 1); | ||
1055 | set_debugreg(0UL, 2); | ||
1056 | set_debugreg(0UL, 3); | ||
1057 | set_debugreg(0UL, 6); | ||
1058 | set_debugreg(0UL, 7); | ||
1059 | #ifdef CONFIG_KGDB | ||
1060 | /* If the kgdb is connected no debug regs should be altered. */ | ||
1061 | } | ||
1062 | #endif | ||
1063 | |||
1064 | fpu_init(); | ||
1065 | |||
1066 | raw_local_save_flags(kernel_eflags); | ||
1067 | |||
1068 | if (is_uv_system()) | ||
1069 | uv_cpu_init(); | ||
1070 | } | ||
1071 | |||
1072 | #else | ||
1073 | |||
673 | void __cpuinit cpu_init(void) | 1074 | void __cpuinit cpu_init(void) |
674 | { | 1075 | { |
675 | int cpu = smp_processor_id(); | 1076 | int cpu = smp_processor_id(); |
@@ -723,19 +1124,21 @@ void __cpuinit cpu_init(void) | |||
723 | /* | 1124 | /* |
724 | * Force FPU initialization: | 1125 | * Force FPU initialization: |
725 | */ | 1126 | */ |
726 | current_thread_info()->status = 0; | 1127 | if (cpu_has_xsave) |
1128 | current_thread_info()->status = TS_XSAVE; | ||
1129 | else | ||
1130 | current_thread_info()->status = 0; | ||
727 | clear_used_math(); | 1131 | clear_used_math(); |
728 | mxcsr_feature_mask_init(); | 1132 | mxcsr_feature_mask_init(); |
729 | } | ||
730 | 1133 | ||
731 | #ifdef CONFIG_HOTPLUG_CPU | 1134 | /* |
732 | void __cpuinit cpu_uninit(void) | 1135 | * Boot processor to setup the FP and extended state context info. |
733 | { | 1136 | */ |
734 | int cpu = raw_smp_processor_id(); | 1137 | if (!smp_processor_id()) |
735 | cpu_clear(cpu, cpu_initialized); | 1138 | init_thread_xstate(); |
736 | 1139 | ||
737 | /* lazy TLB state */ | 1140 | xsave_init(); |
738 | per_cpu(cpu_tlbstate, cpu).state = 0; | ||
739 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | ||
740 | } | 1141 | } |
1142 | |||
1143 | |||
741 | #endif | 1144 | #endif |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c deleted file mode 100644 index a11f5d4477cd..000000000000 --- a/arch/x86/kernel/cpu/common_64.c +++ /dev/null | |||
@@ -1,712 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/bootmem.h> | ||
6 | #include <linux/bitops.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/kgdb.h> | ||
9 | #include <linux/topology.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <asm/i387.h> | ||
14 | #include <asm/msr.h> | ||
15 | #include <asm/io.h> | ||
16 | #include <asm/linkage.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/mtrr.h> | ||
19 | #include <asm/mce.h> | ||
20 | #include <asm/pat.h> | ||
21 | #include <asm/asm.h> | ||
22 | #include <asm/numa.h> | ||
23 | #ifdef CONFIG_X86_LOCAL_APIC | ||
24 | #include <asm/mpspec.h> | ||
25 | #include <asm/apic.h> | ||
26 | #include <mach_apic.h> | ||
27 | #endif | ||
28 | #include <asm/pda.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/desc.h> | ||
32 | #include <asm/atomic.h> | ||
33 | #include <asm/proto.h> | ||
34 | #include <asm/sections.h> | ||
35 | #include <asm/setup.h> | ||
36 | #include <asm/genapic.h> | ||
37 | |||
38 | #include "cpu.h" | ||
39 | |||
40 | /* We need valid kernel segments for data and code in long mode too | ||
41 | * IRET will check the segment types kkeil 2000/10/28 | ||
42 | * Also sysret mandates a special GDT layout | ||
43 | */ | ||
44 | /* The TLS descriptors are currently at a different place compared to i386. | ||
45 | Hopefully nobody expects them at a fixed place (Wine?) */ | ||
46 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | ||
47 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | ||
48 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | ||
49 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | ||
50 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | ||
51 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | ||
52 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | ||
53 | } }; | ||
54 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | ||
55 | |||
56 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
57 | |||
58 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
59 | * it's on the real one. */ | ||
60 | void switch_to_new_gdt(void) | ||
61 | { | ||
62 | struct desc_ptr gdt_descr; | ||
63 | |||
64 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
65 | gdt_descr.size = GDT_SIZE - 1; | ||
66 | load_gdt(&gdt_descr); | ||
67 | } | ||
68 | |||
69 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
70 | |||
71 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | ||
72 | { | ||
73 | display_cacheinfo(c); | ||
74 | } | ||
75 | |||
76 | static struct cpu_dev __cpuinitdata default_cpu = { | ||
77 | .c_init = default_init, | ||
78 | .c_vendor = "Unknown", | ||
79 | }; | ||
80 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
81 | |||
82 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | ||
83 | { | ||
84 | unsigned int *v; | ||
85 | |||
86 | if (c->extended_cpuid_level < 0x80000004) | ||
87 | return 0; | ||
88 | |||
89 | v = (unsigned int *) c->x86_model_id; | ||
90 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | ||
91 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | ||
92 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | ||
93 | c->x86_model_id[48] = 0; | ||
94 | return 1; | ||
95 | } | ||
96 | |||
97 | |||
98 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | ||
99 | { | ||
100 | unsigned int n, dummy, ebx, ecx, edx; | ||
101 | |||
102 | n = c->extended_cpuid_level; | ||
103 | |||
104 | if (n >= 0x80000005) { | ||
105 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | ||
106 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | ||
107 | "D cache %dK (%d bytes/line)\n", | ||
108 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | ||
109 | c->x86_cache_size = (ecx>>24) + (edx>>24); | ||
110 | /* On K8 L1 TLB is inclusive, so don't count it */ | ||
111 | c->x86_tlbsize = 0; | ||
112 | } | ||
113 | |||
114 | if (n >= 0x80000006) { | ||
115 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | ||
116 | ecx = cpuid_ecx(0x80000006); | ||
117 | c->x86_cache_size = ecx >> 16; | ||
118 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | ||
119 | |||
120 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | ||
121 | c->x86_cache_size, ecx & 0xFF); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | ||
126 | { | ||
127 | #ifdef CONFIG_SMP | ||
128 | u32 eax, ebx, ecx, edx; | ||
129 | int index_msb, core_bits; | ||
130 | |||
131 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
132 | |||
133 | |||
134 | if (!cpu_has(c, X86_FEATURE_HT)) | ||
135 | return; | ||
136 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
137 | goto out; | ||
138 | |||
139 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
140 | |||
141 | if (smp_num_siblings == 1) { | ||
142 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | ||
143 | } else if (smp_num_siblings > 1) { | ||
144 | |||
145 | if (smp_num_siblings > NR_CPUS) { | ||
146 | printk(KERN_WARNING "CPU: Unsupported number of " | ||
147 | "siblings %d", smp_num_siblings); | ||
148 | smp_num_siblings = 1; | ||
149 | return; | ||
150 | } | ||
151 | |||
152 | index_msb = get_count_order(smp_num_siblings); | ||
153 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
154 | |||
155 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
156 | |||
157 | index_msb = get_count_order(smp_num_siblings); | ||
158 | |||
159 | core_bits = get_count_order(c->x86_max_cores); | ||
160 | |||
161 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
162 | ((1 << core_bits) - 1); | ||
163 | } | ||
164 | out: | ||
165 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
166 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
167 | c->phys_proc_id); | ||
168 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
169 | c->cpu_core_id); | ||
170 | } | ||
171 | |||
172 | #endif | ||
173 | } | ||
174 | |||
175 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | ||
176 | { | ||
177 | char *v = c->x86_vendor_id; | ||
178 | int i; | ||
179 | static int printed; | ||
180 | |||
181 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
182 | if (cpu_devs[i]) { | ||
183 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | ||
184 | (cpu_devs[i]->c_ident[1] && | ||
185 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | ||
186 | c->x86_vendor = i; | ||
187 | this_cpu = cpu_devs[i]; | ||
188 | return; | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | if (!printed) { | ||
193 | printed++; | ||
194 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | ||
195 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | ||
196 | } | ||
197 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
198 | } | ||
199 | |||
200 | static void __init early_cpu_support_print(void) | ||
201 | { | ||
202 | int i,j; | ||
203 | struct cpu_dev *cpu_devx; | ||
204 | |||
205 | printk("KERNEL supported cpus:\n"); | ||
206 | for (i = 0; i < X86_VENDOR_NUM; i++) { | ||
207 | cpu_devx = cpu_devs[i]; | ||
208 | if (!cpu_devx) | ||
209 | continue; | ||
210 | for (j = 0; j < 2; j++) { | ||
211 | if (!cpu_devx->c_ident[j]) | ||
212 | continue; | ||
213 | printk(" %s %s\n", cpu_devx->c_vendor, | ||
214 | cpu_devx->c_ident[j]); | ||
215 | } | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * The NOPL instruction is supposed to exist on all CPUs with | ||
221 | * family >= 6, unfortunately, that's not true in practice because | ||
222 | * of early VIA chips and (more importantly) broken virtualizers that | ||
223 | * are not easy to detect. Hence, probe for it based on first | ||
224 | * principles. | ||
225 | * | ||
226 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
227 | * for consistency with 32 bits, and to make it utterly trivial to | ||
228 | * diagnose the problem should it ever surface. | ||
229 | */ | ||
230 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
231 | { | ||
232 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
233 | u32 has_nopl = nopl_signature; | ||
234 | |||
235 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
236 | if (c->x86 >= 6) { | ||
237 | asm volatile("\n" | ||
238 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
239 | "2:\n" | ||
240 | " .section .fixup,\"ax\"\n" | ||
241 | "3: xor %0,%0\n" | ||
242 | " jmp 2b\n" | ||
243 | " .previous\n" | ||
244 | _ASM_EXTABLE(1b,3b) | ||
245 | : "+a" (has_nopl)); | ||
246 | |||
247 | if (has_nopl == nopl_signature) | ||
248 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
249 | } | ||
250 | } | ||
251 | |||
252 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | ||
253 | |||
254 | void __init early_cpu_init(void) | ||
255 | { | ||
256 | struct cpu_vendor_dev *cvdev; | ||
257 | |||
258 | for (cvdev = __x86cpuvendor_start ; | ||
259 | cvdev < __x86cpuvendor_end ; | ||
260 | cvdev++) | ||
261 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
262 | early_cpu_support_print(); | ||
263 | early_identify_cpu(&boot_cpu_data); | ||
264 | } | ||
265 | |||
266 | /* Do some early cpuid on the boot CPU to get some parameter that are | ||
267 | needed before check_bugs. Everything advanced is in identify_cpu | ||
268 | below. */ | ||
269 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | ||
270 | { | ||
271 | u32 tfms, xlvl; | ||
272 | |||
273 | c->loops_per_jiffy = loops_per_jiffy; | ||
274 | c->x86_cache_size = -1; | ||
275 | c->x86_vendor = X86_VENDOR_UNKNOWN; | ||
276 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | ||
277 | c->x86_vendor_id[0] = '\0'; /* Unset */ | ||
278 | c->x86_model_id[0] = '\0'; /* Unset */ | ||
279 | c->x86_clflush_size = 64; | ||
280 | c->x86_cache_alignment = c->x86_clflush_size; | ||
281 | c->x86_max_cores = 1; | ||
282 | c->x86_coreid_bits = 0; | ||
283 | c->extended_cpuid_level = 0; | ||
284 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
285 | |||
286 | /* Get vendor name */ | ||
287 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | ||
288 | (unsigned int *)&c->x86_vendor_id[0], | ||
289 | (unsigned int *)&c->x86_vendor_id[8], | ||
290 | (unsigned int *)&c->x86_vendor_id[4]); | ||
291 | |||
292 | get_cpu_vendor(c); | ||
293 | |||
294 | /* Initialize the standard set of capabilities */ | ||
295 | /* Note that the vendor-specific code below might override */ | ||
296 | |||
297 | /* Intel-defined flags: level 0x00000001 */ | ||
298 | if (c->cpuid_level >= 0x00000001) { | ||
299 | __u32 misc; | ||
300 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | ||
301 | &c->x86_capability[0]); | ||
302 | c->x86 = (tfms >> 8) & 0xf; | ||
303 | c->x86_model = (tfms >> 4) & 0xf; | ||
304 | c->x86_mask = tfms & 0xf; | ||
305 | if (c->x86 == 0xf) | ||
306 | c->x86 += (tfms >> 20) & 0xff; | ||
307 | if (c->x86 >= 0x6) | ||
308 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
309 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
310 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | ||
311 | } else { | ||
312 | /* Have CPUID level 0 only - unheard of */ | ||
313 | c->x86 = 4; | ||
314 | } | ||
315 | |||
316 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | ||
317 | #ifdef CONFIG_SMP | ||
318 | c->phys_proc_id = c->initial_apicid; | ||
319 | #endif | ||
320 | /* AMD-defined flags: level 0x80000001 */ | ||
321 | xlvl = cpuid_eax(0x80000000); | ||
322 | c->extended_cpuid_level = xlvl; | ||
323 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
324 | if (xlvl >= 0x80000001) { | ||
325 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
326 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
327 | } | ||
328 | if (xlvl >= 0x80000004) | ||
329 | get_model_name(c); /* Default name */ | ||
330 | } | ||
331 | |||
332 | /* Transmeta-defined flags: level 0x80860001 */ | ||
333 | xlvl = cpuid_eax(0x80860000); | ||
334 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
335 | /* Don't set x86_cpuid_level here for now to not confuse. */ | ||
336 | if (xlvl >= 0x80860001) | ||
337 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
338 | } | ||
339 | |||
340 | if (c->extended_cpuid_level >= 0x80000007) | ||
341 | c->x86_power = cpuid_edx(0x80000007); | ||
342 | |||
343 | if (c->extended_cpuid_level >= 0x80000008) { | ||
344 | u32 eax = cpuid_eax(0x80000008); | ||
345 | |||
346 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
347 | c->x86_phys_bits = eax & 0xff; | ||
348 | } | ||
349 | |||
350 | detect_nopl(c); | ||
351 | |||
352 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | ||
353 | cpu_devs[c->x86_vendor]->c_early_init) | ||
354 | cpu_devs[c->x86_vendor]->c_early_init(c); | ||
355 | |||
356 | validate_pat_support(c); | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * This does the hard work of actually picking apart the CPU stuff... | ||
361 | */ | ||
362 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | ||
363 | { | ||
364 | int i; | ||
365 | |||
366 | early_identify_cpu(c); | ||
367 | |||
368 | init_scattered_cpuid_features(c); | ||
369 | |||
370 | c->apicid = phys_pkg_id(0); | ||
371 | |||
372 | /* | ||
373 | * Vendor-specific initialization. In this section we | ||
374 | * canonicalize the feature flags, meaning if there are | ||
375 | * features a certain CPU supports which CPUID doesn't | ||
376 | * tell us, CPUID claiming incorrect flags, or other bugs, | ||
377 | * we handle them here. | ||
378 | * | ||
379 | * At the end of this section, c->x86_capability better | ||
380 | * indicate the features this CPU genuinely supports! | ||
381 | */ | ||
382 | if (this_cpu->c_init) | ||
383 | this_cpu->c_init(c); | ||
384 | |||
385 | detect_ht(c); | ||
386 | |||
387 | /* | ||
388 | * On SMP, boot_cpu_data holds the common feature set between | ||
389 | * all CPUs; so make sure that we indicate which features are | ||
390 | * common between the CPUs. The first time this routine gets | ||
391 | * executed, c == &boot_cpu_data. | ||
392 | */ | ||
393 | if (c != &boot_cpu_data) { | ||
394 | /* AND the already accumulated flags with these */ | ||
395 | for (i = 0; i < NCAPINTS; i++) | ||
396 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | ||
397 | } | ||
398 | |||
399 | /* Clear all flags overriden by options */ | ||
400 | for (i = 0; i < NCAPINTS; i++) | ||
401 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | ||
402 | |||
403 | #ifdef CONFIG_X86_MCE | ||
404 | mcheck_init(c); | ||
405 | #endif | ||
406 | select_idle_routine(c); | ||
407 | |||
408 | #ifdef CONFIG_NUMA | ||
409 | numa_add_cpu(smp_processor_id()); | ||
410 | #endif | ||
411 | |||
412 | } | ||
413 | |||
414 | void __cpuinit identify_boot_cpu(void) | ||
415 | { | ||
416 | identify_cpu(&boot_cpu_data); | ||
417 | } | ||
418 | |||
419 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | ||
420 | { | ||
421 | BUG_ON(c == &boot_cpu_data); | ||
422 | identify_cpu(c); | ||
423 | mtrr_ap_init(); | ||
424 | } | ||
425 | |||
426 | static __init int setup_noclflush(char *arg) | ||
427 | { | ||
428 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | ||
429 | return 1; | ||
430 | } | ||
431 | __setup("noclflush", setup_noclflush); | ||
432 | |||
433 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | ||
434 | { | ||
435 | if (c->x86_model_id[0]) | ||
436 | printk(KERN_CONT "%s", c->x86_model_id); | ||
437 | |||
438 | if (c->x86_mask || c->cpuid_level >= 0) | ||
439 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | ||
440 | else | ||
441 | printk(KERN_CONT "\n"); | ||
442 | } | ||
443 | |||
444 | static __init int setup_disablecpuid(char *arg) | ||
445 | { | ||
446 | int bit; | ||
447 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | ||
448 | setup_clear_cpu_cap(bit); | ||
449 | else | ||
450 | return 0; | ||
451 | return 1; | ||
452 | } | ||
453 | __setup("clearcpuid=", setup_disablecpuid); | ||
454 | |||
455 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | ||
456 | |||
457 | struct x8664_pda **_cpu_pda __read_mostly; | ||
458 | EXPORT_SYMBOL(_cpu_pda); | ||
459 | |||
460 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | ||
461 | |||
462 | char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | ||
463 | |||
464 | unsigned long __supported_pte_mask __read_mostly = ~0UL; | ||
465 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | ||
466 | |||
467 | static int do_not_nx __cpuinitdata; | ||
468 | |||
469 | /* noexec=on|off | ||
470 | Control non executable mappings for 64bit processes. | ||
471 | |||
472 | on Enable(default) | ||
473 | off Disable | ||
474 | */ | ||
475 | static int __init nonx_setup(char *str) | ||
476 | { | ||
477 | if (!str) | ||
478 | return -EINVAL; | ||
479 | if (!strncmp(str, "on", 2)) { | ||
480 | __supported_pte_mask |= _PAGE_NX; | ||
481 | do_not_nx = 0; | ||
482 | } else if (!strncmp(str, "off", 3)) { | ||
483 | do_not_nx = 1; | ||
484 | __supported_pte_mask &= ~_PAGE_NX; | ||
485 | } | ||
486 | return 0; | ||
487 | } | ||
488 | early_param("noexec", nonx_setup); | ||
489 | |||
490 | int force_personality32; | ||
491 | |||
492 | /* noexec32=on|off | ||
493 | Control non executable heap for 32bit processes. | ||
494 | To control the stack too use noexec=off | ||
495 | |||
496 | on PROT_READ does not imply PROT_EXEC for 32bit processes (default) | ||
497 | off PROT_READ implies PROT_EXEC | ||
498 | */ | ||
499 | static int __init nonx32_setup(char *str) | ||
500 | { | ||
501 | if (!strcmp(str, "on")) | ||
502 | force_personality32 &= ~READ_IMPLIES_EXEC; | ||
503 | else if (!strcmp(str, "off")) | ||
504 | force_personality32 |= READ_IMPLIES_EXEC; | ||
505 | return 1; | ||
506 | } | ||
507 | __setup("noexec32=", nonx32_setup); | ||
508 | |||
509 | void pda_init(int cpu) | ||
510 | { | ||
511 | struct x8664_pda *pda = cpu_pda(cpu); | ||
512 | |||
513 | /* Setup up data that may be needed in __get_free_pages early */ | ||
514 | loadsegment(fs, 0); | ||
515 | loadsegment(gs, 0); | ||
516 | /* Memory clobbers used to order PDA accessed */ | ||
517 | mb(); | ||
518 | wrmsrl(MSR_GS_BASE, pda); | ||
519 | mb(); | ||
520 | |||
521 | pda->cpunumber = cpu; | ||
522 | pda->irqcount = -1; | ||
523 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
524 | PDA_STACKOFFSET + THREAD_SIZE; | ||
525 | pda->active_mm = &init_mm; | ||
526 | pda->mmu_state = 0; | ||
527 | |||
528 | if (cpu == 0) { | ||
529 | /* others are initialized in smpboot.c */ | ||
530 | pda->pcurrent = &init_task; | ||
531 | pda->irqstackptr = boot_cpu_stack; | ||
532 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
533 | } else { | ||
534 | if (!pda->irqstackptr) { | ||
535 | pda->irqstackptr = (char *) | ||
536 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
537 | if (!pda->irqstackptr) | ||
538 | panic("cannot allocate irqstack for cpu %d", | ||
539 | cpu); | ||
540 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
541 | } | ||
542 | |||
543 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | ||
544 | pda->nodenumber = cpu_to_node(cpu); | ||
545 | } | ||
546 | } | ||
547 | |||
548 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | ||
549 | DEBUG_STKSZ] __page_aligned_bss; | ||
550 | |||
551 | extern asmlinkage void ignore_sysret(void); | ||
552 | |||
553 | /* May not be marked __init: used by software suspend */ | ||
554 | void syscall_init(void) | ||
555 | { | ||
556 | /* | ||
557 | * LSTAR and STAR live in a bit strange symbiosis. | ||
558 | * They both write to the same internal register. STAR allows to | ||
559 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | ||
560 | */ | ||
561 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | ||
562 | wrmsrl(MSR_LSTAR, system_call); | ||
563 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
564 | |||
565 | #ifdef CONFIG_IA32_EMULATION | ||
566 | syscall32_cpu_init(); | ||
567 | #endif | ||
568 | |||
569 | /* Flags to clear on syscall */ | ||
570 | wrmsrl(MSR_SYSCALL_MASK, | ||
571 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | ||
572 | } | ||
573 | |||
574 | void __cpuinit check_efer(void) | ||
575 | { | ||
576 | unsigned long efer; | ||
577 | |||
578 | rdmsrl(MSR_EFER, efer); | ||
579 | if (!(efer & EFER_NX) || do_not_nx) | ||
580 | __supported_pte_mask &= ~_PAGE_NX; | ||
581 | } | ||
582 | |||
583 | unsigned long kernel_eflags; | ||
584 | |||
585 | /* | ||
586 | * Copies of the original ist values from the tss are only accessed during | ||
587 | * debugging, no special alignment required. | ||
588 | */ | ||
589 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | ||
590 | |||
591 | /* | ||
592 | * cpu_init() initializes state that is per-CPU. Some data is already | ||
593 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
594 | * and IDT. We reload them nevertheless, this function acts as a | ||
595 | * 'CPU state barrier', nothing should get across. | ||
596 | * A lot of state is already set up in PDA init. | ||
597 | */ | ||
598 | void __cpuinit cpu_init(void) | ||
599 | { | ||
600 | int cpu = stack_smp_processor_id(); | ||
601 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
602 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
603 | unsigned long v; | ||
604 | char *estacks = NULL; | ||
605 | struct task_struct *me; | ||
606 | int i; | ||
607 | |||
608 | /* CPU 0 is initialised in head64.c */ | ||
609 | if (cpu != 0) | ||
610 | pda_init(cpu); | ||
611 | else | ||
612 | estacks = boot_exception_stacks; | ||
613 | |||
614 | me = current; | ||
615 | |||
616 | if (cpu_test_and_set(cpu, cpu_initialized)) | ||
617 | panic("CPU#%d already initialized!\n", cpu); | ||
618 | |||
619 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
620 | |||
621 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
622 | |||
623 | /* | ||
624 | * Initialize the per-CPU GDT with the boot GDT, | ||
625 | * and set up the GDT descriptor: | ||
626 | */ | ||
627 | |||
628 | switch_to_new_gdt(); | ||
629 | load_idt((const struct desc_ptr *)&idt_descr); | ||
630 | |||
631 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | ||
632 | syscall_init(); | ||
633 | |||
634 | wrmsrl(MSR_FS_BASE, 0); | ||
635 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | ||
636 | barrier(); | ||
637 | |||
638 | check_efer(); | ||
639 | |||
640 | /* | ||
641 | * set up and load the per-CPU TSS | ||
642 | */ | ||
643 | if (!orig_ist->ist[0]) { | ||
644 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
645 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
646 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
647 | }; | ||
648 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | ||
649 | if (cpu) { | ||
650 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
651 | if (!estacks) | ||
652 | panic("Cannot allocate exception " | ||
653 | "stack %ld %d\n", v, cpu); | ||
654 | } | ||
655 | estacks += PAGE_SIZE << order[v]; | ||
656 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
657 | (unsigned long)estacks; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | ||
662 | /* | ||
663 | * <= is required because the CPU will access up to | ||
664 | * 8 bits beyond the end of the IO permission bitmap. | ||
665 | */ | ||
666 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | ||
667 | t->io_bitmap[i] = ~0UL; | ||
668 | |||
669 | atomic_inc(&init_mm.mm_count); | ||
670 | me->active_mm = &init_mm; | ||
671 | if (me->mm) | ||
672 | BUG(); | ||
673 | enter_lazy_tlb(&init_mm, me); | ||
674 | |||
675 | load_sp0(t, ¤t->thread); | ||
676 | set_tss_desc(cpu, t); | ||
677 | load_TR_desc(); | ||
678 | load_LDT(&init_mm.context); | ||
679 | |||
680 | #ifdef CONFIG_KGDB | ||
681 | /* | ||
682 | * If the kgdb is connected no debug regs should be altered. This | ||
683 | * is only applicable when KGDB and a KGDB I/O module are built | ||
684 | * into the kernel and you are using early debugging with | ||
685 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | ||
686 | */ | ||
687 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | ||
688 | arch_kgdb_ops.correct_hw_break(); | ||
689 | else { | ||
690 | #endif | ||
691 | /* | ||
692 | * Clear all 6 debug registers: | ||
693 | */ | ||
694 | |||
695 | set_debugreg(0UL, 0); | ||
696 | set_debugreg(0UL, 1); | ||
697 | set_debugreg(0UL, 2); | ||
698 | set_debugreg(0UL, 3); | ||
699 | set_debugreg(0UL, 6); | ||
700 | set_debugreg(0UL, 7); | ||
701 | #ifdef CONFIG_KGDB | ||
702 | /* If the kgdb is connected no debug regs should be altered. */ | ||
703 | } | ||
704 | #endif | ||
705 | |||
706 | fpu_init(); | ||
707 | |||
708 | raw_local_save_flags(kernel_eflags); | ||
709 | |||
710 | if (is_uv_system()) | ||
711 | uv_cpu_init(); | ||
712 | } | ||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 4d894e8565fe..de4094a39210 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -21,23 +21,16 @@ struct cpu_dev { | |||
21 | void (*c_init)(struct cpuinfo_x86 * c); | 21 | void (*c_init)(struct cpuinfo_x86 * c); |
22 | void (*c_identify)(struct cpuinfo_x86 * c); | 22 | void (*c_identify)(struct cpuinfo_x86 * c); |
23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); | 23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); |
24 | int c_x86_vendor; | ||
24 | }; | 25 | }; |
25 | 26 | ||
26 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; | 27 | #define cpu_dev_register(cpu_devX) \ |
28 | static struct cpu_dev *__cpu_dev_##cpu_devX __used \ | ||
29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ | ||
30 | &cpu_devX; | ||
27 | 31 | ||
28 | struct cpu_vendor_dev { | 32 | extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; |
29 | int vendor; | ||
30 | struct cpu_dev *cpu_dev; | ||
31 | }; | ||
32 | |||
33 | #define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \ | ||
34 | static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \ | ||
35 | __attribute__((__section__(".x86cpuvendor.init"))) = \ | ||
36 | { cpu_vendor_id, cpu_dev } | ||
37 | |||
38 | extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[]; | ||
39 | 33 | ||
40 | extern int get_model_name(struct cpuinfo_x86 *c); | ||
41 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 34 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
42 | 35 | ||
43 | #endif | 36 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index dd097b835839..c24c4a487b7c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -256,7 +256,8 @@ static u32 get_cur_val(const cpumask_t *mask) | |||
256 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and | 256 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and |
257 | * no meaning should be associated with absolute values of these MSRs. | 257 | * no meaning should be associated with absolute values of these MSRs. |
258 | */ | 258 | */ |
259 | static unsigned int get_measured_perf(unsigned int cpu) | 259 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, |
260 | unsigned int cpu) | ||
260 | { | 261 | { |
261 | union { | 262 | union { |
262 | struct { | 263 | struct { |
@@ -326,7 +327,7 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
326 | 327 | ||
327 | #endif | 328 | #endif |
328 | 329 | ||
329 | retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100; | 330 | retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; |
330 | 331 | ||
331 | put_cpu(); | 332 | put_cpu(); |
332 | set_cpus_allowed_ptr(current, &saved_mask); | 333 | set_cpus_allowed_ptr(current, &saved_mask); |
@@ -785,7 +786,11 @@ static int __init acpi_cpufreq_init(void) | |||
785 | if (ret) | 786 | if (ret) |
786 | return ret; | 787 | return ret; |
787 | 788 | ||
788 | return cpufreq_register_driver(&acpi_cpufreq_driver); | 789 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
790 | if (ret) | ||
791 | free_percpu(acpi_perf_data); | ||
792 | |||
793 | return ret; | ||
789 | } | 794 | } |
790 | 795 | ||
791 | static void __exit acpi_cpufreq_exit(void) | 796 | static void __exit acpi_cpufreq_exit(void) |
@@ -795,8 +800,6 @@ static void __exit acpi_cpufreq_exit(void) | |||
795 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 800 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
796 | 801 | ||
797 | free_percpu(acpi_perf_data); | 802 | free_percpu(acpi_perf_data); |
798 | |||
799 | return; | ||
800 | } | 803 | } |
801 | 804 | ||
802 | module_param(acpi_pstate_strict, uint, 0644); | 805 | module_param(acpi_pstate_strict, uint, 0644); |
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index e4a4bf870e94..fe613c93b366 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c | |||
@@ -25,8 +25,8 @@ | |||
25 | #include <linux/cpufreq.h> | 25 | #include <linux/cpufreq.h> |
26 | 26 | ||
27 | #include <asm/msr.h> | 27 | #include <asm/msr.h> |
28 | #include <asm/timex.h> | 28 | #include <linux/timex.h> |
29 | #include <asm/io.h> | 29 | #include <linux/io.h> |
30 | 30 | ||
31 | #define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ | 31 | #define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ |
32 | #define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ | 32 | #define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ |
@@ -82,7 +82,7 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
82 | u8 clockspeed_reg; /* Clock Speed Register */ | 82 | u8 clockspeed_reg; /* Clock Speed Register */ |
83 | 83 | ||
84 | local_irq_disable(); | 84 | local_irq_disable(); |
85 | outb_p(0x80,REG_CSCIR); | 85 | outb_p(0x80, REG_CSCIR); |
86 | clockspeed_reg = inb_p(REG_CSCDR); | 86 | clockspeed_reg = inb_p(REG_CSCDR); |
87 | local_irq_enable(); | 87 | local_irq_enable(); |
88 | 88 | ||
@@ -98,10 +98,10 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | /* 33 MHz is not 32 MHz... */ | 100 | /* 33 MHz is not 32 MHz... */ |
101 | if ((clockspeed_reg & 0xE0)==0xA0) | 101 | if ((clockspeed_reg & 0xE0) == 0xA0) |
102 | return 33000; | 102 | return 33000; |
103 | 103 | ||
104 | return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); | 104 | return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000; |
105 | } | 105 | } |
106 | 106 | ||
107 | 107 | ||
@@ -117,7 +117,7 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
117 | * There is no return value. | 117 | * There is no return value. |
118 | */ | 118 | */ |
119 | 119 | ||
120 | static void elanfreq_set_cpu_state (unsigned int state) | 120 | static void elanfreq_set_cpu_state(unsigned int state) |
121 | { | 121 | { |
122 | struct cpufreq_freqs freqs; | 122 | struct cpufreq_freqs freqs; |
123 | 123 | ||
@@ -144,20 +144,20 @@ static void elanfreq_set_cpu_state (unsigned int state) | |||
144 | */ | 144 | */ |
145 | 145 | ||
146 | local_irq_disable(); | 146 | local_irq_disable(); |
147 | outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ | 147 | outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */ |
148 | outb_p(0x00,REG_CSCDR); | 148 | outb_p(0x00, REG_CSCDR); |
149 | local_irq_enable(); /* wait till internal pipelines and */ | 149 | local_irq_enable(); /* wait till internal pipelines and */ |
150 | udelay(1000); /* buffers have cleaned up */ | 150 | udelay(1000); /* buffers have cleaned up */ |
151 | 151 | ||
152 | local_irq_disable(); | 152 | local_irq_disable(); |
153 | 153 | ||
154 | /* now, set the CPU clock speed register (0x80) */ | 154 | /* now, set the CPU clock speed register (0x80) */ |
155 | outb_p(0x80,REG_CSCIR); | 155 | outb_p(0x80, REG_CSCIR); |
156 | outb_p(elan_multiplier[state].val80h,REG_CSCDR); | 156 | outb_p(elan_multiplier[state].val80h, REG_CSCDR); |
157 | 157 | ||
158 | /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ | 158 | /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ |
159 | outb_p(0x40,REG_CSCIR); | 159 | outb_p(0x40, REG_CSCIR); |
160 | outb_p(elan_multiplier[state].val40h,REG_CSCDR); | 160 | outb_p(elan_multiplier[state].val40h, REG_CSCDR); |
161 | udelay(10000); | 161 | udelay(10000); |
162 | local_irq_enable(); | 162 | local_irq_enable(); |
163 | 163 | ||
@@ -173,12 +173,12 @@ static void elanfreq_set_cpu_state (unsigned int state) | |||
173 | * for the hardware supported by the driver. | 173 | * for the hardware supported by the driver. |
174 | */ | 174 | */ |
175 | 175 | ||
176 | static int elanfreq_verify (struct cpufreq_policy *policy) | 176 | static int elanfreq_verify(struct cpufreq_policy *policy) |
177 | { | 177 | { |
178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); | 178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); |
179 | } | 179 | } |
180 | 180 | ||
181 | static int elanfreq_target (struct cpufreq_policy *policy, | 181 | static int elanfreq_target(struct cpufreq_policy *policy, |
182 | unsigned int target_freq, | 182 | unsigned int target_freq, |
183 | unsigned int relation) | 183 | unsigned int relation) |
184 | { | 184 | { |
@@ -205,7 +205,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
205 | 205 | ||
206 | /* capability check */ | 206 | /* capability check */ |
207 | if ((c->x86_vendor != X86_VENDOR_AMD) || | 207 | if ((c->x86_vendor != X86_VENDOR_AMD) || |
208 | (c->x86 != 4) || (c->x86_model!=10)) | 208 | (c->x86 != 4) || (c->x86_model != 10)) |
209 | return -ENODEV; | 209 | return -ENODEV; |
210 | 210 | ||
211 | /* max freq */ | 211 | /* max freq */ |
@@ -213,7 +213,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
213 | max_freq = elanfreq_get_cpu_frequency(0); | 213 | max_freq = elanfreq_get_cpu_frequency(0); |
214 | 214 | ||
215 | /* table init */ | 215 | /* table init */ |
216 | for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { | 216 | for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { |
217 | if (elanfreq_table[i].frequency > max_freq) | 217 | if (elanfreq_table[i].frequency > max_freq) |
218 | elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; | 218 | elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
219 | } | 219 | } |
@@ -224,7 +224,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
224 | 224 | ||
225 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); | 225 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); |
226 | if (result) | 226 | if (result) |
227 | return (result); | 227 | return result; |
228 | 228 | ||
229 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); | 229 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); |
230 | return 0; | 230 | return 0; |
@@ -260,7 +260,7 @@ __setup("elanfreq=", elanfreq_setup); | |||
260 | #endif | 260 | #endif |
261 | 261 | ||
262 | 262 | ||
263 | static struct freq_attr* elanfreq_attr[] = { | 263 | static struct freq_attr *elanfreq_attr[] = { |
264 | &cpufreq_freq_attr_scaling_available_freqs, | 264 | &cpufreq_freq_attr_scaling_available_freqs, |
265 | NULL, | 265 | NULL, |
266 | }; | 266 | }; |
@@ -284,9 +284,9 @@ static int __init elanfreq_init(void) | |||
284 | 284 | ||
285 | /* Test if we have the right hardware */ | 285 | /* Test if we have the right hardware */ |
286 | if ((c->x86_vendor != X86_VENDOR_AMD) || | 286 | if ((c->x86_vendor != X86_VENDOR_AMD) || |
287 | (c->x86 != 4) || (c->x86_model!=10)) { | 287 | (c->x86 != 4) || (c->x86_model != 10)) { |
288 | printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); | 288 | printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); |
289 | return -ENODEV; | 289 | return -ENODEV; |
290 | } | 290 | } |
291 | return cpufreq_register_driver(&elanfreq_driver); | 291 | return cpufreq_register_driver(&elanfreq_driver); |
292 | } | 292 | } |
@@ -298,7 +298,7 @@ static void __exit elanfreq_exit(void) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | 300 | ||
301 | module_param (max_freq, int, 0444); | 301 | module_param(max_freq, int, 0444); |
302 | 302 | ||
303 | MODULE_LICENSE("GPL"); | 303 | MODULE_LICENSE("GPL"); |
304 | MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); | 304 | MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index eb9b62b0830c..b5ced806a316 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -15,12 +15,11 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | 16 | ||
17 | #include <asm/msr.h> | 17 | #include <asm/msr.h> |
18 | #include <asm/timex.h> | 18 | #include <linux/timex.h> |
19 | #include <asm/io.h> | 19 | #include <linux/io.h> |
20 | 20 | ||
21 | 21 | #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long | |
22 | #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long | 22 | as it is unused */ |
23 | as it is unused */ | ||
24 | 23 | ||
25 | static unsigned int busfreq; /* FSB, in 10 kHz */ | 24 | static unsigned int busfreq; /* FSB, in 10 kHz */ |
26 | static unsigned int max_multiplier; | 25 | static unsigned int max_multiplier; |
@@ -53,7 +52,7 @@ static int powernow_k6_get_cpu_multiplier(void) | |||
53 | 52 | ||
54 | msrval = POWERNOW_IOPORT + 0x1; | 53 | msrval = POWERNOW_IOPORT + 0x1; |
55 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ | 54 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ |
56 | invalue=inl(POWERNOW_IOPORT + 0x8); | 55 | invalue = inl(POWERNOW_IOPORT + 0x8); |
57 | msrval = POWERNOW_IOPORT + 0x0; | 56 | msrval = POWERNOW_IOPORT + 0x0; |
58 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ | 57 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ |
59 | 58 | ||
@@ -67,9 +66,9 @@ static int powernow_k6_get_cpu_multiplier(void) | |||
67 | * | 66 | * |
68 | * Tries to change the PowerNow! multiplier | 67 | * Tries to change the PowerNow! multiplier |
69 | */ | 68 | */ |
70 | static void powernow_k6_set_state (unsigned int best_i) | 69 | static void powernow_k6_set_state(unsigned int best_i) |
71 | { | 70 | { |
72 | unsigned long outvalue=0, invalue=0; | 71 | unsigned long outvalue = 0, invalue = 0; |
73 | unsigned long msrval; | 72 | unsigned long msrval; |
74 | struct cpufreq_freqs freqs; | 73 | struct cpufreq_freqs freqs; |
75 | 74 | ||
@@ -90,10 +89,10 @@ static void powernow_k6_set_state (unsigned int best_i) | |||
90 | 89 | ||
91 | msrval = POWERNOW_IOPORT + 0x1; | 90 | msrval = POWERNOW_IOPORT + 0x1; |
92 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ | 91 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ |
93 | invalue=inl(POWERNOW_IOPORT + 0x8); | 92 | invalue = inl(POWERNOW_IOPORT + 0x8); |
94 | invalue = invalue & 0xf; | 93 | invalue = invalue & 0xf; |
95 | outvalue = outvalue | invalue; | 94 | outvalue = outvalue | invalue; |
96 | outl(outvalue ,(POWERNOW_IOPORT + 0x8)); | 95 | outl(outvalue , (POWERNOW_IOPORT + 0x8)); |
97 | msrval = POWERNOW_IOPORT + 0x0; | 96 | msrval = POWERNOW_IOPORT + 0x0; |
98 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ | 97 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ |
99 | 98 | ||
@@ -124,7 +123,7 @@ static int powernow_k6_verify(struct cpufreq_policy *policy) | |||
124 | * | 123 | * |
125 | * sets a new CPUFreq policy | 124 | * sets a new CPUFreq policy |
126 | */ | 125 | */ |
127 | static int powernow_k6_target (struct cpufreq_policy *policy, | 126 | static int powernow_k6_target(struct cpufreq_policy *policy, |
128 | unsigned int target_freq, | 127 | unsigned int target_freq, |
129 | unsigned int relation) | 128 | unsigned int relation) |
130 | { | 129 | { |
@@ -152,7 +151,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
152 | busfreq = cpu_khz / max_multiplier; | 151 | busfreq = cpu_khz / max_multiplier; |
153 | 152 | ||
154 | /* table init */ | 153 | /* table init */ |
155 | for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { | 154 | for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { |
156 | if (clock_ratio[i].index > max_multiplier) | 155 | if (clock_ratio[i].index > max_multiplier) |
157 | clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; | 156 | clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; |
158 | else | 157 | else |
@@ -165,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
165 | 164 | ||
166 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | 165 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); |
167 | if (result) | 166 | if (result) |
168 | return (result); | 167 | return result; |
169 | 168 | ||
170 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); | 169 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); |
171 | 170 | ||
@@ -176,8 +175,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
176 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) | 175 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) |
177 | { | 176 | { |
178 | unsigned int i; | 177 | unsigned int i; |
179 | for (i=0; i<8; i++) { | 178 | for (i = 0; i < 8; i++) { |
180 | if (i==max_multiplier) | 179 | if (i == max_multiplier) |
181 | powernow_k6_set_state(i); | 180 | powernow_k6_set_state(i); |
182 | } | 181 | } |
183 | cpufreq_frequency_table_put_attr(policy->cpu); | 182 | cpufreq_frequency_table_put_attr(policy->cpu); |
@@ -189,7 +188,7 @@ static unsigned int powernow_k6_get(unsigned int cpu) | |||
189 | return busfreq * powernow_k6_get_cpu_multiplier(); | 188 | return busfreq * powernow_k6_get_cpu_multiplier(); |
190 | } | 189 | } |
191 | 190 | ||
192 | static struct freq_attr* powernow_k6_attr[] = { | 191 | static struct freq_attr *powernow_k6_attr[] = { |
193 | &cpufreq_freq_attr_scaling_available_freqs, | 192 | &cpufreq_freq_attr_scaling_available_freqs, |
194 | NULL, | 193 | NULL, |
195 | }; | 194 | }; |
@@ -227,7 +226,7 @@ static int __init powernow_k6_init(void) | |||
227 | } | 226 | } |
228 | 227 | ||
229 | if (cpufreq_register_driver(&powernow_k6_driver)) { | 228 | if (cpufreq_register_driver(&powernow_k6_driver)) { |
230 | release_region (POWERNOW_IOPORT, 16); | 229 | release_region(POWERNOW_IOPORT, 16); |
231 | return -EINVAL; | 230 | return -EINVAL; |
232 | } | 231 | } |
233 | 232 | ||
@@ -243,13 +242,13 @@ static int __init powernow_k6_init(void) | |||
243 | static void __exit powernow_k6_exit(void) | 242 | static void __exit powernow_k6_exit(void) |
244 | { | 243 | { |
245 | cpufreq_unregister_driver(&powernow_k6_driver); | 244 | cpufreq_unregister_driver(&powernow_k6_driver); |
246 | release_region (POWERNOW_IOPORT, 16); | 245 | release_region(POWERNOW_IOPORT, 16); |
247 | } | 246 | } |
248 | 247 | ||
249 | 248 | ||
250 | MODULE_AUTHOR ("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); | 249 | MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); |
251 | MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); | 250 | MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); |
252 | MODULE_LICENSE ("GPL"); | 251 | MODULE_LICENSE("GPL"); |
253 | 252 | ||
254 | module_init(powernow_k6_init); | 253 | module_init(powernow_k6_init); |
255 | module_exit(powernow_k6_exit); | 254 | module_exit(powernow_k6_exit); |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 898a5a2002ed..ffd0f5ed071a 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -121,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void) | |||
121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
122 | 122 | ||
123 | /* Load/Store Serialize to mem access disable (=reorder it) */ | 123 | /* Load/Store Serialize to mem access disable (=reorder it) */ |
124 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); | 124 | setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); |
125 | /* set load/store serialize from 1GB to 4GB */ | 125 | /* set load/store serialize from 1GB to 4GB */ |
126 | ccr3 |= 0xe0; | 126 | ccr3 |= 0xe0; |
127 | setCx86(CX86_CCR3, ccr3); | 127 | setCx86(CX86_CCR3, ccr3); |
@@ -132,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void) | |||
132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
133 | 133 | ||
134 | /* CCR2 bit 2: unlock NW bit */ | 134 | /* CCR2 bit 2: unlock NW bit */ |
135 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); | 135 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); |
136 | /* set 'Not Write-through' */ | 136 | /* set 'Not Write-through' */ |
137 | write_cr0(read_cr0() | X86_CR0_NW); | 137 | write_cr0(read_cr0() | X86_CR0_NW); |
138 | /* CCR2 bit 2: lock NW bit and set WT1 */ | 138 | /* CCR2 bit 2: lock NW bit and set WT1 */ |
139 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); | 139 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | 142 | /* |
@@ -150,14 +150,14 @@ static void __cpuinit geode_configure(void) | |||
150 | local_irq_save(flags); | 150 | local_irq_save(flags); |
151 | 151 | ||
152 | /* Suspend on halt power saving and enable #SUSP pin */ | 152 | /* Suspend on halt power saving and enable #SUSP pin */ |
153 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); | 153 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); |
154 | 154 | ||
155 | ccr3 = getCx86(CX86_CCR3); | 155 | ccr3 = getCx86(CX86_CCR3); |
156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
157 | 157 | ||
158 | 158 | ||
159 | /* FPU fast, DTE cache, Mem bypass */ | 159 | /* FPU fast, DTE cache, Mem bypass */ |
160 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); | 160 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); |
161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
162 | 162 | ||
163 | set_cx86_memwb(); | 163 | set_cx86_memwb(); |
@@ -291,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
291 | /* GXm supports extended cpuid levels 'ala' AMD */ | 291 | /* GXm supports extended cpuid levels 'ala' AMD */ |
292 | if (c->cpuid_level == 2) { | 292 | if (c->cpuid_level == 2) { |
293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ | 293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ |
294 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); | 294 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); |
295 | 295 | ||
296 | /* | 296 | /* |
297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 | 297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 |
@@ -301,7 +301,6 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
301 | */ | 301 | */ |
302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) | 302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) |
303 | geode_configure(); | 303 | geode_configure(); |
304 | get_model_name(c); /* get CPU marketing name */ | ||
305 | return; | 304 | return; |
306 | } else { /* MediaGX */ | 305 | } else { /* MediaGX */ |
307 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; | 306 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; |
@@ -314,7 +313,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
314 | if (dir1 > 7) { | 313 | if (dir1 > 7) { |
315 | dir0_msn++; /* M II */ | 314 | dir0_msn++; /* M II */ |
316 | /* Enable MMX extensions (App note 108) */ | 315 | /* Enable MMX extensions (App note 108) */ |
317 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); | 316 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); |
318 | } else { | 317 | } else { |
319 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ | 318 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
320 | } | 319 | } |
@@ -429,7 +428,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
429 | local_irq_save(flags); | 428 | local_irq_save(flags); |
430 | ccr3 = getCx86(CX86_CCR3); | 429 | ccr3 = getCx86(CX86_CCR3); |
431 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 430 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
432 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */ | 431 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ |
433 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 432 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
434 | local_irq_restore(flags); | 433 | local_irq_restore(flags); |
435 | } | 434 | } |
@@ -442,14 +441,16 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | |||
442 | .c_early_init = early_init_cyrix, | 441 | .c_early_init = early_init_cyrix, |
443 | .c_init = init_cyrix, | 442 | .c_init = init_cyrix, |
444 | .c_identify = cyrix_identify, | 443 | .c_identify = cyrix_identify, |
444 | .c_x86_vendor = X86_VENDOR_CYRIX, | ||
445 | }; | 445 | }; |
446 | 446 | ||
447 | cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); | 447 | cpu_dev_register(cyrix_cpu_dev); |
448 | 448 | ||
449 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 449 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { |
450 | .c_vendor = "NSC", | 450 | .c_vendor = "NSC", |
451 | .c_ident = { "Geode by NSC" }, | 451 | .c_ident = { "Geode by NSC" }, |
452 | .c_init = init_nsc, | 452 | .c_init = init_nsc, |
453 | .c_x86_vendor = X86_VENDOR_NSC, | ||
453 | }; | 454 | }; |
454 | 455 | ||
455 | cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); | 456 | cpu_dev_register(nsc_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c deleted file mode 100644 index c9017799497c..000000000000 --- a/arch/x86/kernel/cpu/feature_names.c +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* | ||
2 | * Strings for the various x86 capability flags. | ||
3 | * | ||
4 | * This file must not contain any executable code. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cpufeature.h> | ||
8 | |||
9 | /* | ||
10 | * These flag bits must match the definitions in <asm/cpufeature.h>. | ||
11 | * NULL means this bit is undefined or reserved; either way it doesn't | ||
12 | * have meaning as far as Linux is concerned. Note that it's important | ||
13 | * to realize there is a difference between this table and CPUID -- if | ||
14 | * applications want to get the raw CPUID data, they should access | ||
15 | * /dev/cpu/<cpu_nr>/cpuid instead. | ||
16 | */ | ||
17 | const char * const x86_cap_flags[NCAPINTS*32] = { | ||
18 | /* Intel-defined */ | ||
19 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | ||
20 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | ||
21 | "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", | ||
22 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", | ||
23 | |||
24 | /* AMD-defined */ | ||
25 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
26 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | ||
27 | NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, | ||
28 | NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", | ||
29 | "3dnowext", "3dnow", | ||
30 | |||
31 | /* Transmeta-defined */ | ||
32 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, | ||
33 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
34 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
35 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
36 | |||
37 | /* Other (Linux-defined) */ | ||
38 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | ||
39 | NULL, NULL, NULL, NULL, | ||
40 | "constant_tsc", "up", NULL, "arch_perfmon", | ||
41 | "pebs", "bts", NULL, NULL, | ||
42 | "rep_good", NULL, NULL, NULL, | ||
43 | "nopl", NULL, NULL, NULL, | ||
44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
45 | |||
46 | /* Intel-defined (#2) */ | ||
47 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | ||
48 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, | ||
49 | NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", | ||
50 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
51 | |||
52 | /* VIA/Cyrix/Centaur-defined */ | ||
53 | NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", | ||
54 | "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, | ||
55 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
56 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
57 | |||
58 | /* AMD-defined (#2) */ | ||
59 | "lahf_lm", "cmp_legacy", "svm", "extapic", | ||
60 | "cr8_legacy", "abm", "sse4a", "misalignsse", | ||
61 | "3dnowprefetch", "osvw", "ibs", "sse5", | ||
62 | "skinit", "wdt", NULL, NULL, | ||
63 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
64 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
65 | |||
66 | /* Auxiliary (Linux-defined) */ | ||
67 | "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
68 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
69 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
70 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
71 | }; | ||
72 | |||
73 | const char *const x86_power_flags[32] = { | ||
74 | "ts", /* temperature sensor */ | ||
75 | "fid", /* frequency id control */ | ||
76 | "vid", /* voltage id control */ | ||
77 | "ttp", /* thermal trip */ | ||
78 | "tm", | ||
79 | "stc", | ||
80 | "100mhzsteps", | ||
81 | "hwpstate", | ||
82 | "", /* tsc invariant mapped to constant_tsc */ | ||
83 | /* nothing */ | ||
84 | }; | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b75f2569b8f8..99468dbd08da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -15,6 +15,11 @@ | |||
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | 17 | ||
18 | #ifdef CONFIG_X86_64 | ||
19 | #include <asm/topology.h> | ||
20 | #include <asm/numa_64.h> | ||
21 | #endif | ||
22 | |||
18 | #include "cpu.h" | 23 | #include "cpu.h" |
19 | 24 | ||
20 | #ifdef CONFIG_X86_LOCAL_APIC | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -23,23 +28,22 @@ | |||
23 | #include <mach_apic.h> | 28 | #include <mach_apic.h> |
24 | #endif | 29 | #endif |
25 | 30 | ||
26 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
27 | /* | ||
28 | * Alignment at which movsl is preferred for bulk memory copies. | ||
29 | */ | ||
30 | struct movsl_mask movsl_mask __read_mostly; | ||
31 | #endif | ||
32 | |||
33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
34 | { | 32 | { |
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
37 | c->x86_cache_alignment = 128; | ||
38 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 33 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
39 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 34 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
40 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 35 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
36 | |||
37 | #ifdef CONFIG_X86_64 | ||
38 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
39 | #else | ||
40 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
41 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
42 | c->x86_cache_alignment = 128; | ||
43 | #endif | ||
41 | } | 44 | } |
42 | 45 | ||
46 | #ifdef CONFIG_X86_32 | ||
43 | /* | 47 | /* |
44 | * Early probe support logic for ppro memory erratum #50 | 48 | * Early probe support logic for ppro memory erratum #50 |
45 | * | 49 | * |
@@ -59,15 +63,54 @@ int __cpuinit ppro_with_ram_bug(void) | |||
59 | return 0; | 63 | return 0; |
60 | } | 64 | } |
61 | 65 | ||
66 | #ifdef CONFIG_X86_F00F_BUG | ||
67 | static void __cpuinit trap_init_f00f_bug(void) | ||
68 | { | ||
69 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
62 | 70 | ||
63 | /* | 71 | /* |
64 | * P4 Xeon errata 037 workaround. | 72 | * Update the IDT descriptor and reload the IDT so that |
65 | * Hardware prefetcher may cause stale data to be loaded into the cache. | 73 | * it uses the read-only mapped virtual address. |
66 | */ | 74 | */ |
67 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | 75 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); |
76 | load_idt(&idt_descr); | ||
77 | } | ||
78 | #endif | ||
79 | |||
80 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | ||
68 | { | 81 | { |
69 | unsigned long lo, hi; | 82 | unsigned long lo, hi; |
70 | 83 | ||
84 | #ifdef CONFIG_X86_F00F_BUG | ||
85 | /* | ||
86 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
87 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
88 | * Note that the workaround only should be initialized once... | ||
89 | */ | ||
90 | c->f00f_bug = 0; | ||
91 | if (!paravirt_enabled() && c->x86 == 5) { | ||
92 | static int f00f_workaround_enabled; | ||
93 | |||
94 | c->f00f_bug = 1; | ||
95 | if (!f00f_workaround_enabled) { | ||
96 | trap_init_f00f_bug(); | ||
97 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
98 | f00f_workaround_enabled = 1; | ||
99 | } | ||
100 | } | ||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | ||
105 | * model 3 mask 3 | ||
106 | */ | ||
107 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | ||
108 | clear_cpu_cap(c, X86_FEATURE_SEP); | ||
109 | |||
110 | /* | ||
111 | * P4 Xeon errata 037 workaround. | ||
112 | * Hardware prefetcher may cause stale data to be loaded into the cache. | ||
113 | */ | ||
71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 114 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
72 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); | 115 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
73 | if ((lo & (1<<9)) == 0) { | 116 | if ((lo & (1<<9)) == 0) { |
@@ -77,13 +120,68 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
77 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 120 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
78 | } | 121 | } |
79 | } | 122 | } |
123 | |||
124 | /* | ||
125 | * See if we have a good local APIC by checking for buggy Pentia, | ||
126 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
127 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
128 | * Specification Update"). | ||
129 | */ | ||
130 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | ||
131 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | ||
132 | set_cpu_cap(c, X86_FEATURE_11AP); | ||
133 | |||
134 | |||
135 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
136 | /* | ||
137 | * Set up the preferred alignment for movsl bulk memory moves | ||
138 | */ | ||
139 | switch (c->x86) { | ||
140 | case 4: /* 486: untested */ | ||
141 | break; | ||
142 | case 5: /* Old Pentia: untested */ | ||
143 | break; | ||
144 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
145 | movsl_mask.mask = 7; | ||
146 | break; | ||
147 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
148 | movsl_mask.mask = 7; | ||
149 | break; | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | #ifdef CONFIG_X86_NUMAQ | ||
154 | numaq_tsc_disable(); | ||
155 | #endif | ||
80 | } | 156 | } |
157 | #else | ||
158 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | ||
159 | { | ||
160 | } | ||
161 | #endif | ||
81 | 162 | ||
163 | static void __cpuinit srat_detect_node(void) | ||
164 | { | ||
165 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
166 | unsigned node; | ||
167 | int cpu = smp_processor_id(); | ||
168 | int apicid = hard_smp_processor_id(); | ||
169 | |||
170 | /* Don't do the funky fallback heuristics the AMD version employs | ||
171 | for now. */ | ||
172 | node = apicid_to_node[apicid]; | ||
173 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
174 | node = first_node(node_online_map); | ||
175 | numa_set_node(cpu, node); | ||
176 | |||
177 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
178 | #endif | ||
179 | } | ||
82 | 180 | ||
83 | /* | 181 | /* |
84 | * find out the number of processor cores on the die | 182 | * find out the number of processor cores on the die |
85 | */ | 183 | */ |
86 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | 184 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) |
87 | { | 185 | { |
88 | unsigned int eax, ebx, ecx, edx; | 186 | unsigned int eax, ebx, ecx, edx; |
89 | 187 | ||
@@ -98,45 +196,51 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
98 | return 1; | 196 | return 1; |
99 | } | 197 | } |
100 | 198 | ||
101 | #ifdef CONFIG_X86_F00F_BUG | 199 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) |
102 | static void __cpuinit trap_init_f00f_bug(void) | ||
103 | { | 200 | { |
104 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | 201 | /* Intel VMX MSR indicated features */ |
105 | 202 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
106 | /* | 203 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 |
107 | * Update the IDT descriptor and reload the IDT so that | 204 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 |
108 | * it uses the read-only mapped virtual address. | 205 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 |
109 | */ | 206 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 |
110 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | 207 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 |
111 | load_idt(&idt_descr); | 208 | |
209 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | ||
210 | |||
211 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | ||
212 | clear_cpu_cap(c, X86_FEATURE_VNMI); | ||
213 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | ||
214 | clear_cpu_cap(c, X86_FEATURE_EPT); | ||
215 | clear_cpu_cap(c, X86_FEATURE_VPID); | ||
216 | |||
217 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | ||
218 | msr_ctl = vmx_msr_high | vmx_msr_low; | ||
219 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | ||
220 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | ||
221 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | ||
222 | set_cpu_cap(c, X86_FEATURE_VNMI); | ||
223 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | ||
224 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | ||
225 | vmx_msr_low, vmx_msr_high); | ||
226 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | ||
227 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | ||
228 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | ||
229 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | ||
230 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | ||
231 | set_cpu_cap(c, X86_FEATURE_EPT); | ||
232 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | ||
233 | set_cpu_cap(c, X86_FEATURE_VPID); | ||
234 | } | ||
112 | } | 235 | } |
113 | #endif | ||
114 | 236 | ||
115 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 237 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
116 | { | 238 | { |
117 | unsigned int l2 = 0; | 239 | unsigned int l2 = 0; |
118 | char *p = NULL; | ||
119 | 240 | ||
120 | early_init_intel(c); | 241 | early_init_intel(c); |
121 | 242 | ||
122 | #ifdef CONFIG_X86_F00F_BUG | 243 | intel_workarounds(c); |
123 | /* | ||
124 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
125 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
126 | * Note that the workaround only should be initialized once... | ||
127 | */ | ||
128 | c->f00f_bug = 0; | ||
129 | if (!paravirt_enabled() && c->x86 == 5) { | ||
130 | static int f00f_workaround_enabled; | ||
131 | |||
132 | c->f00f_bug = 1; | ||
133 | if (!f00f_workaround_enabled) { | ||
134 | trap_init_f00f_bug(); | ||
135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
136 | f00f_workaround_enabled = 1; | ||
137 | } | ||
138 | } | ||
139 | #endif | ||
140 | 244 | ||
141 | l2 = init_intel_cacheinfo(c); | 245 | l2 = init_intel_cacheinfo(c); |
142 | if (c->cpuid_level > 9) { | 246 | if (c->cpuid_level > 9) { |
@@ -146,16 +250,32 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
146 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | 250 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
147 | } | 251 | } |
148 | 252 | ||
149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | 253 | if (cpu_has_xmm2) |
150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | 254 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
151 | clear_cpu_cap(c, X86_FEATURE_SEP); | 255 | if (cpu_has_ds) { |
256 | unsigned int l1; | ||
257 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
258 | if (!(l1 & (1<<11))) | ||
259 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
260 | if (!(l1 & (1<<12))) | ||
261 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
262 | ds_init_intel(c); | ||
263 | } | ||
152 | 264 | ||
265 | #ifdef CONFIG_X86_64 | ||
266 | if (c->x86 == 15) | ||
267 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
268 | if (c->x86 == 6) | ||
269 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
270 | #else | ||
153 | /* | 271 | /* |
154 | * Names for the Pentium II/Celeron processors | 272 | * Names for the Pentium II/Celeron processors |
155 | * detectable only by also checking the cache size. | 273 | * detectable only by also checking the cache size. |
156 | * Dixon is NOT a Celeron. | 274 | * Dixon is NOT a Celeron. |
157 | */ | 275 | */ |
158 | if (c->x86 == 6) { | 276 | if (c->x86 == 6) { |
277 | char *p = NULL; | ||
278 | |||
159 | switch (c->x86_model) { | 279 | switch (c->x86_model) { |
160 | case 5: | 280 | case 5: |
161 | if (c->x86_mask == 0) { | 281 | if (c->x86_mask == 0) { |
@@ -178,70 +298,41 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
178 | p = "Celeron (Coppermine)"; | 298 | p = "Celeron (Coppermine)"; |
179 | break; | 299 | break; |
180 | } | 300 | } |
181 | } | ||
182 | |||
183 | if (p) | ||
184 | strcpy(c->x86_model_id, p); | ||
185 | |||
186 | c->x86_max_cores = num_cpu_cores(c); | ||
187 | |||
188 | detect_ht(c); | ||
189 | 301 | ||
190 | /* Work around errata */ | 302 | if (p) |
191 | Intel_errata_workarounds(c); | 303 | strcpy(c->x86_model_id, p); |
192 | |||
193 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
194 | /* | ||
195 | * Set up the preferred alignment for movsl bulk memory moves | ||
196 | */ | ||
197 | switch (c->x86) { | ||
198 | case 4: /* 486: untested */ | ||
199 | break; | ||
200 | case 5: /* Old Pentia: untested */ | ||
201 | break; | ||
202 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
203 | movsl_mask.mask = 7; | ||
204 | break; | ||
205 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
206 | movsl_mask.mask = 7; | ||
207 | break; | ||
208 | } | 304 | } |
209 | #endif | ||
210 | 305 | ||
211 | if (cpu_has_xmm2) | 306 | if (c->x86 == 15) |
212 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
213 | if (c->x86 == 15) { | ||
214 | set_cpu_cap(c, X86_FEATURE_P4); | 307 | set_cpu_cap(c, X86_FEATURE_P4); |
215 | } | ||
216 | if (c->x86 == 6) | 308 | if (c->x86 == 6) |
217 | set_cpu_cap(c, X86_FEATURE_P3); | 309 | set_cpu_cap(c, X86_FEATURE_P3); |
218 | if (cpu_has_ds) { | ||
219 | unsigned int l1; | ||
220 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
221 | if (!(l1 & (1<<11))) | ||
222 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
223 | if (!(l1 & (1<<12))) | ||
224 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
225 | } | ||
226 | 310 | ||
227 | if (cpu_has_bts) | 311 | if (cpu_has_bts) |
228 | ds_init_intel(c); | 312 | ptrace_bts_init_intel(c); |
229 | 313 | ||
230 | /* | 314 | #endif |
231 | * See if we have a good local APIC by checking for buggy Pentia, | ||
232 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
233 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
234 | * Specification Update"). | ||
235 | */ | ||
236 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | ||
237 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | ||
238 | set_cpu_cap(c, X86_FEATURE_11AP); | ||
239 | 315 | ||
240 | #ifdef CONFIG_X86_NUMAQ | 316 | detect_extended_topology(c); |
241 | numaq_tsc_disable(); | 317 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
318 | /* | ||
319 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
320 | * detection. | ||
321 | */ | ||
322 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
323 | #ifdef CONFIG_X86_32 | ||
324 | detect_ht(c); | ||
242 | #endif | 325 | #endif |
326 | } | ||
327 | |||
328 | /* Work around errata */ | ||
329 | srat_detect_node(); | ||
330 | |||
331 | if (cpu_has(c, X86_FEATURE_VMX)) | ||
332 | detect_vmx_virtcap(c); | ||
243 | } | 333 | } |
244 | 334 | ||
335 | #ifdef CONFIG_X86_32 | ||
245 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 336 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
246 | { | 337 | { |
247 | /* | 338 | /* |
@@ -254,10 +345,12 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
254 | size = 256; | 345 | size = 256; |
255 | return size; | 346 | return size; |
256 | } | 347 | } |
348 | #endif | ||
257 | 349 | ||
258 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 350 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
259 | .c_vendor = "Intel", | 351 | .c_vendor = "Intel", |
260 | .c_ident = { "GenuineIntel" }, | 352 | .c_ident = { "GenuineIntel" }, |
353 | #ifdef CONFIG_X86_32 | ||
261 | .c_models = { | 354 | .c_models = { |
262 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | 355 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
263 | { | 356 | { |
@@ -307,76 +400,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
307 | } | 400 | } |
308 | }, | 401 | }, |
309 | }, | 402 | }, |
403 | .c_size_cache = intel_size_cache, | ||
404 | #endif | ||
310 | .c_early_init = early_init_intel, | 405 | .c_early_init = early_init_intel, |
311 | .c_init = init_intel, | 406 | .c_init = init_intel, |
312 | .c_size_cache = intel_size_cache, | 407 | .c_x86_vendor = X86_VENDOR_INTEL, |
313 | }; | 408 | }; |
314 | 409 | ||
315 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | 410 | cpu_dev_register(intel_cpu_dev); |
316 | |||
317 | #ifndef CONFIG_X86_CMPXCHG | ||
318 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
319 | { | ||
320 | u8 prev; | ||
321 | unsigned long flags; | ||
322 | |||
323 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
324 | local_irq_save(flags); | ||
325 | prev = *(u8 *)ptr; | ||
326 | if (prev == old) | ||
327 | *(u8 *)ptr = new; | ||
328 | local_irq_restore(flags); | ||
329 | return prev; | ||
330 | } | ||
331 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
332 | |||
333 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
334 | { | ||
335 | u16 prev; | ||
336 | unsigned long flags; | ||
337 | |||
338 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
339 | local_irq_save(flags); | ||
340 | prev = *(u16 *)ptr; | ||
341 | if (prev == old) | ||
342 | *(u16 *)ptr = new; | ||
343 | local_irq_restore(flags); | ||
344 | return prev; | ||
345 | } | ||
346 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
347 | |||
348 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
349 | { | ||
350 | u32 prev; | ||
351 | unsigned long flags; | ||
352 | |||
353 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
354 | local_irq_save(flags); | ||
355 | prev = *(u32 *)ptr; | ||
356 | if (prev == old) | ||
357 | *(u32 *)ptr = new; | ||
358 | local_irq_restore(flags); | ||
359 | return prev; | ||
360 | } | ||
361 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
362 | #endif | ||
363 | |||
364 | #ifndef CONFIG_X86_CMPXCHG64 | ||
365 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
366 | { | ||
367 | u64 prev; | ||
368 | unsigned long flags; | ||
369 | |||
370 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
371 | local_irq_save(flags); | ||
372 | prev = *(u64 *)ptr; | ||
373 | if (prev == old) | ||
374 | *(u64 *)ptr = new; | ||
375 | local_irq_restore(flags); | ||
376 | return prev; | ||
377 | } | ||
378 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
379 | #endif | ||
380 | |||
381 | /* arch_initcall(intel_cpu_init); */ | ||
382 | 411 | ||
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c deleted file mode 100644 index 1019c58d39f0..000000000000 --- a/arch/x86/kernel/cpu/intel_64.c +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/smp.h> | ||
3 | #include <asm/processor.h> | ||
4 | #include <asm/ptrace.h> | ||
5 | #include <asm/topology.h> | ||
6 | #include <asm/numa_64.h> | ||
7 | |||
8 | #include "cpu.h" | ||
9 | |||
10 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
11 | { | ||
12 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
13 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
14 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
15 | |||
16 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
17 | } | ||
18 | |||
19 | /* | ||
20 | * find out the number of processor cores on the die | ||
21 | */ | ||
22 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | ||
23 | { | ||
24 | unsigned int eax, t; | ||
25 | |||
26 | if (c->cpuid_level < 4) | ||
27 | return 1; | ||
28 | |||
29 | cpuid_count(4, 0, &eax, &t, &t, &t); | ||
30 | |||
31 | if (eax & 0x1f) | ||
32 | return ((eax >> 26) + 1); | ||
33 | else | ||
34 | return 1; | ||
35 | } | ||
36 | |||
37 | static void __cpuinit srat_detect_node(void) | ||
38 | { | ||
39 | #ifdef CONFIG_NUMA | ||
40 | unsigned node; | ||
41 | int cpu = smp_processor_id(); | ||
42 | int apicid = hard_smp_processor_id(); | ||
43 | |||
44 | /* Don't do the funky fallback heuristics the AMD version employs | ||
45 | for now. */ | ||
46 | node = apicid_to_node[apicid]; | ||
47 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
48 | node = first_node(node_online_map); | ||
49 | numa_set_node(cpu, node); | ||
50 | |||
51 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
52 | #endif | ||
53 | } | ||
54 | |||
55 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
56 | { | ||
57 | init_intel_cacheinfo(c); | ||
58 | if (c->cpuid_level > 9) { | ||
59 | unsigned eax = cpuid_eax(10); | ||
60 | /* Check for version and the number of counters */ | ||
61 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
62 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | ||
63 | } | ||
64 | |||
65 | if (cpu_has_ds) { | ||
66 | unsigned int l1, l2; | ||
67 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
68 | if (!(l1 & (1<<11))) | ||
69 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
70 | if (!(l1 & (1<<12))) | ||
71 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
72 | } | ||
73 | |||
74 | |||
75 | if (cpu_has_bts) | ||
76 | ds_init_intel(c); | ||
77 | |||
78 | if (c->x86 == 15) | ||
79 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
80 | if (c->x86 == 6) | ||
81 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
82 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
83 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
84 | |||
85 | srat_detect_node(); | ||
86 | } | ||
87 | |||
88 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | ||
89 | .c_vendor = "Intel", | ||
90 | .c_ident = { "GenuineIntel" }, | ||
91 | .c_early_init = early_init_intel, | ||
92 | .c_init = init_intel, | ||
93 | }; | ||
94 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | ||
95 | |||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b0a10b002f1..3f46afbb1cf1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Routines to indentify caches on Intel CPU. | 2 | * Routines to indentify caches on Intel CPU. |
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/pci.h> | ||
16 | 17 | ||
17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
@@ -130,9 +131,18 @@ struct _cpuid4_info { | |||
130 | union _cpuid4_leaf_ebx ebx; | 131 | union _cpuid4_leaf_ebx ebx; |
131 | union _cpuid4_leaf_ecx ecx; | 132 | union _cpuid4_leaf_ecx ecx; |
132 | unsigned long size; | 133 | unsigned long size; |
134 | unsigned long can_disable; | ||
133 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 135 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ |
134 | }; | 136 | }; |
135 | 137 | ||
138 | #ifdef CONFIG_PCI | ||
139 | static struct pci_device_id k8_nb_id[] = { | ||
140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | ||
141 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | ||
142 | {} | ||
143 | }; | ||
144 | #endif | ||
145 | |||
136 | unsigned short num_cache_leaves; | 146 | unsigned short num_cache_leaves; |
137 | 147 | ||
138 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 148 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = { | |||
182 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 192 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; |
183 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 193 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; |
184 | 194 | ||
185 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 195 | static void __cpuinit |
186 | union _cpuid4_leaf_ebx *ebx, | 196 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
187 | union _cpuid4_leaf_ecx *ecx) | 197 | union _cpuid4_leaf_ebx *ebx, |
198 | union _cpuid4_leaf_ecx *ecx) | ||
188 | { | 199 | { |
189 | unsigned dummy; | 200 | unsigned dummy; |
190 | unsigned line_size, lines_per_tag, assoc, size_in_kb; | 201 | unsigned line_size, lines_per_tag, assoc, size_in_kb; |
@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
251 | (ebx->split.ways_of_associativity + 1) - 1; | 262 | (ebx->split.ways_of_associativity + 1) - 1; |
252 | } | 263 | } |
253 | 264 | ||
254 | static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 265 | static void __cpuinit |
266 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | ||
267 | { | ||
268 | if (index < 3) | ||
269 | return; | ||
270 | this_leaf->can_disable = 1; | ||
271 | } | ||
272 | |||
273 | static int | ||
274 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
255 | { | 275 | { |
256 | union _cpuid4_leaf_eax eax; | 276 | union _cpuid4_leaf_eax eax; |
257 | union _cpuid4_leaf_ebx ebx; | 277 | union _cpuid4_leaf_ebx ebx; |
258 | union _cpuid4_leaf_ecx ecx; | 278 | union _cpuid4_leaf_ecx ecx; |
259 | unsigned edx; | 279 | unsigned edx; |
260 | 280 | ||
261 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | 281 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
262 | amd_cpuid4(index, &eax, &ebx, &ecx); | 282 | amd_cpuid4(index, &eax, &ebx, &ecx); |
263 | else | 283 | if (boot_cpu_data.x86 >= 0x10) |
264 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 284 | amd_check_l3_disable(index, this_leaf); |
285 | } else { | ||
286 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | ||
287 | } | ||
288 | |||
265 | if (eax.split.type == CACHE_TYPE_NULL) | 289 | if (eax.split.type == CACHE_TYPE_NULL) |
266 | return -EIO; /* better error ? */ | 290 | return -EIO; /* better error ? */ |
267 | 291 | ||
268 | this_leaf->eax = eax; | 292 | this_leaf->eax = eax; |
269 | this_leaf->ebx = ebx; | 293 | this_leaf->ebx = ebx; |
270 | this_leaf->ecx = ecx; | 294 | this_leaf->ecx = ecx; |
271 | this_leaf->size = (ecx.split.number_of_sets + 1) * | 295 | this_leaf->size = (ecx.split.number_of_sets + 1) * |
272 | (ebx.split.coherency_line_size + 1) * | 296 | (ebx.split.coherency_line_size + 1) * |
273 | (ebx.split.physical_line_partition + 1) * | 297 | (ebx.split.physical_line_partition + 1) * |
274 | (ebx.split.ways_of_associativity + 1); | 298 | (ebx.split.ways_of_associativity + 1); |
275 | return 0; | 299 | return 0; |
276 | } | 300 | } |
277 | 301 | ||
@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
453 | 477 | ||
454 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 478 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
455 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 479 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
456 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 480 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
457 | 481 | ||
458 | #ifdef CONFIG_SMP | 482 | #ifdef CONFIG_SMP |
459 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 483 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
490 | 514 | ||
491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 515 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 516 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 517 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 518 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
495 | } | 519 | } |
496 | } | 520 | } |
@@ -572,7 +596,7 @@ struct _index_kobject { | |||
572 | 596 | ||
573 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 597 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
574 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 598 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); |
575 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 599 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) |
576 | 600 | ||
577 | #define show_one_plus(file_name, object, val) \ | 601 | #define show_one_plus(file_name, object, val) \ |
578 | static ssize_t show_##file_name \ | 602 | static ssize_t show_##file_name \ |
@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { | |||
637 | } | 661 | } |
638 | } | 662 | } |
639 | 663 | ||
664 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
665 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
666 | |||
667 | #ifdef CONFIG_PCI | ||
668 | static struct pci_dev *get_k8_northbridge(int node) | ||
669 | { | ||
670 | struct pci_dev *dev = NULL; | ||
671 | int i; | ||
672 | |||
673 | for (i = 0; i <= node; i++) { | ||
674 | do { | ||
675 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
676 | if (!dev) | ||
677 | break; | ||
678 | } while (!pci_match_id(&k8_nb_id[0], dev)); | ||
679 | if (!dev) | ||
680 | break; | ||
681 | } | ||
682 | return dev; | ||
683 | } | ||
684 | #else | ||
685 | static struct pci_dev *get_k8_northbridge(int node) | ||
686 | { | ||
687 | return NULL; | ||
688 | } | ||
689 | #endif | ||
690 | |||
691 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | ||
692 | { | ||
693 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
694 | struct pci_dev *dev = NULL; | ||
695 | ssize_t ret = 0; | ||
696 | int i; | ||
697 | |||
698 | if (!this_leaf->can_disable) | ||
699 | return sprintf(buf, "Feature not enabled\n"); | ||
700 | |||
701 | dev = get_k8_northbridge(node); | ||
702 | if (!dev) { | ||
703 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
704 | return -EINVAL; | ||
705 | } | ||
706 | |||
707 | for (i = 0; i < 2; i++) { | ||
708 | unsigned int reg; | ||
709 | |||
710 | pci_read_config_dword(dev, 0x1BC + i * 4, ®); | ||
711 | |||
712 | ret += sprintf(buf, "%sEntry: %d\n", buf, i); | ||
713 | ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n", | ||
714 | buf, | ||
715 | reg & 0x80000000 ? "Disabled" : "Allowed", | ||
716 | reg & 0x40000000 ? "Disabled" : "Allowed"); | ||
717 | ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", | ||
718 | buf, (reg & 0x30000) >> 16, reg & 0xfff); | ||
719 | } | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static ssize_t | ||
724 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | ||
725 | size_t count) | ||
726 | { | ||
727 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
728 | struct pci_dev *dev = NULL; | ||
729 | unsigned int ret, index, val; | ||
730 | |||
731 | if (!this_leaf->can_disable) | ||
732 | return 0; | ||
733 | |||
734 | if (strlen(buf) > 15) | ||
735 | return -EINVAL; | ||
736 | |||
737 | ret = sscanf(buf, "%x %x", &index, &val); | ||
738 | if (ret != 2) | ||
739 | return -EINVAL; | ||
740 | if (index > 1) | ||
741 | return -EINVAL; | ||
742 | |||
743 | val |= 0xc0000000; | ||
744 | dev = get_k8_northbridge(node); | ||
745 | if (!dev) { | ||
746 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | |||
750 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | ||
751 | wbinvd(); | ||
752 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | ||
753 | |||
754 | return 1; | ||
755 | } | ||
756 | |||
640 | struct _cache_attr { | 757 | struct _cache_attr { |
641 | struct attribute attr; | 758 | struct attribute attr; |
642 | ssize_t (*show)(struct _cpuid4_info *, char *); | 759 | ssize_t (*show)(struct _cpuid4_info *, char *); |
@@ -657,6 +774,8 @@ define_one_ro(size); | |||
657 | define_one_ro(shared_cpu_map); | 774 | define_one_ro(shared_cpu_map); |
658 | define_one_ro(shared_cpu_list); | 775 | define_one_ro(shared_cpu_list); |
659 | 776 | ||
777 | static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable); | ||
778 | |||
660 | static struct attribute * default_attrs[] = { | 779 | static struct attribute * default_attrs[] = { |
661 | &type.attr, | 780 | &type.attr, |
662 | &level.attr, | 781 | &level.attr, |
@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = { | |||
667 | &size.attr, | 786 | &size.attr, |
668 | &shared_cpu_map.attr, | 787 | &shared_cpu_map.attr, |
669 | &shared_cpu_list.attr, | 788 | &shared_cpu_list.attr, |
789 | &cache_disable.attr, | ||
670 | NULL | 790 | NULL |
671 | }; | 791 | }; |
672 | 792 | ||
673 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
674 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
675 | |||
676 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 793 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) |
677 | { | 794 | { |
678 | struct _cache_attr *fattr = to_attr(attr); | 795 | struct _cache_attr *fattr = to_attr(attr); |
@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
682 | ret = fattr->show ? | 799 | ret = fattr->show ? |
683 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 800 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
684 | buf) : | 801 | buf) : |
685 | 0; | 802 | 0; |
686 | return ret; | 803 | return ret; |
687 | } | 804 | } |
688 | 805 | ||
689 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 806 | static ssize_t store(struct kobject * kobj, struct attribute * attr, |
690 | const char * buf, size_t count) | 807 | const char * buf, size_t count) |
691 | { | 808 | { |
692 | return 0; | 809 | struct _cache_attr *fattr = to_attr(attr); |
810 | struct _index_kobject *this_leaf = to_object(kobj); | ||
811 | ssize_t ret; | ||
812 | |||
813 | ret = fattr->store ? | ||
814 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
815 | buf, count) : | ||
816 | 0; | ||
817 | return ret; | ||
693 | } | 818 | } |
694 | 819 | ||
695 | static struct sysfs_ops sysfs_ops = { | 820 | static struct sysfs_ops sysfs_ops = { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 726a5fcdf341..4b031a4ac856 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -860,7 +860,7 @@ error: | |||
860 | return err; | 860 | return err; |
861 | } | 861 | } |
862 | 862 | ||
863 | static void mce_remove_device(unsigned int cpu) | 863 | static __cpuinit void mce_remove_device(unsigned int cpu) |
864 | { | 864 | { |
865 | int i; | 865 | int i; |
866 | 866 | ||
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl new file mode 100644 index 000000000000..dfea390e1608 --- /dev/null +++ b/arch/x86/kernel/cpu/mkcapflags.pl | |||
@@ -0,0 +1,32 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h | ||
4 | # | ||
5 | |||
6 | ($in, $out) = @ARGV; | ||
7 | |||
8 | open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n"; | ||
9 | open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n"; | ||
10 | |||
11 | print OUT "#include <asm/cpufeature.h>\n\n"; | ||
12 | print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n"; | ||
13 | |||
14 | while (defined($line = <IN>)) { | ||
15 | if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) { | ||
16 | $macro = $1; | ||
17 | $feature = $2; | ||
18 | $tail = $3; | ||
19 | if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) { | ||
20 | $feature = $1; | ||
21 | } | ||
22 | |||
23 | if ($feature ne '') { | ||
24 | printf OUT "\t%-32s = \"%s\",\n", | ||
25 | "[$macro]", "\L$feature"; | ||
26 | } | ||
27 | } | ||
28 | } | ||
29 | print OUT "};\n"; | ||
30 | |||
31 | close(IN); | ||
32 | close(OUT); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index cb7d3b6a80eb..4e8d77f01eeb 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -401,12 +401,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
401 | tmp |= ~((1<<(hi - 1)) - 1); | 401 | tmp |= ~((1<<(hi - 1)) - 1); |
402 | 402 | ||
403 | if (tmp != mask_lo) { | 403 | if (tmp != mask_lo) { |
404 | static int once = 1; | 404 | WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); |
405 | |||
406 | if (once) { | ||
407 | printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); | ||
408 | once = 0; | ||
409 | } | ||
410 | mask_lo = tmp; | 405 | mask_lo = tmp; |
411 | } | 406 | } |
412 | } | 407 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 84c480bb3715..4c4214690dd1 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -405,9 +405,9 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) | |||
405 | } | 405 | } |
406 | /* RED-PEN: base can be > 32bit */ | 406 | /* RED-PEN: base can be > 32bit */ |
407 | len += seq_printf(seq, | 407 | len += seq_printf(seq, |
408 | "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", | 408 | "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", |
409 | i, base, base >> (20 - PAGE_SHIFT), size, factor, | 409 | i, base, base >> (20 - PAGE_SHIFT), size, factor, |
410 | mtrr_attrib_to_str(type), mtrr_usage_table[i]); | 410 | mtrr_usage_table[i], mtrr_attrib_to_str(type)); |
411 | } | 411 | } |
412 | } | 412 | } |
413 | return 0; | 413 | return 0; |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 885c8265e6b5..c78c04821ea1 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -729,7 +729,7 @@ struct var_mtrr_range_state { | |||
729 | mtrr_type type; | 729 | mtrr_type type; |
730 | }; | 730 | }; |
731 | 731 | ||
732 | struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | 732 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; |
733 | static int __initdata debug_print; | 733 | static int __initdata debug_print; |
734 | 734 | ||
735 | static int __init | 735 | static int __init |
@@ -759,7 +759,8 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
759 | /* take out UC ranges */ | 759 | /* take out UC ranges */ |
760 | for (i = 0; i < num_var_ranges; i++) { | 760 | for (i = 0; i < num_var_ranges; i++) { |
761 | type = range_state[i].type; | 761 | type = range_state[i].type; |
762 | if (type != MTRR_TYPE_UNCACHABLE) | 762 | if (type != MTRR_TYPE_UNCACHABLE && |
763 | type != MTRR_TYPE_WRPROT) | ||
763 | continue; | 764 | continue; |
764 | size = range_state[i].size_pfn; | 765 | size = range_state[i].size_pfn; |
765 | if (!size) | 766 | if (!size) |
@@ -836,6 +837,13 @@ static int __init enable_mtrr_cleanup_setup(char *str) | |||
836 | } | 837 | } |
837 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); | 838 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); |
838 | 839 | ||
840 | static int __init mtrr_cleanup_debug_setup(char *str) | ||
841 | { | ||
842 | debug_print = 1; | ||
843 | return 0; | ||
844 | } | ||
845 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | ||
846 | |||
839 | struct var_mtrr_state { | 847 | struct var_mtrr_state { |
840 | unsigned long range_startk; | 848 | unsigned long range_startk; |
841 | unsigned long range_sizek; | 849 | unsigned long range_sizek; |
@@ -898,6 +906,27 @@ set_var_mtrr_all(unsigned int address_bits) | |||
898 | } | 906 | } |
899 | } | 907 | } |
900 | 908 | ||
909 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | ||
910 | { | ||
911 | char factor; | ||
912 | unsigned long base = sizek; | ||
913 | |||
914 | if (base & ((1<<10) - 1)) { | ||
915 | /* not MB alignment */ | ||
916 | factor = 'K'; | ||
917 | } else if (base & ((1<<20) - 1)){ | ||
918 | factor = 'M'; | ||
919 | base >>= 10; | ||
920 | } else { | ||
921 | factor = 'G'; | ||
922 | base >>= 20; | ||
923 | } | ||
924 | |||
925 | *factorp = factor; | ||
926 | |||
927 | return base; | ||
928 | } | ||
929 | |||
901 | static unsigned int __init | 930 | static unsigned int __init |
902 | range_to_mtrr(unsigned int reg, unsigned long range_startk, | 931 | range_to_mtrr(unsigned int reg, unsigned long range_startk, |
903 | unsigned long range_sizek, unsigned char type) | 932 | unsigned long range_sizek, unsigned char type) |
@@ -919,13 +948,21 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
919 | align = max_align; | 948 | align = max_align; |
920 | 949 | ||
921 | sizek = 1 << align; | 950 | sizek = 1 << align; |
922 | if (debug_print) | 951 | if (debug_print) { |
952 | char start_factor = 'K', size_factor = 'K'; | ||
953 | unsigned long start_base, size_base; | ||
954 | |||
955 | start_base = to_size_factor(range_startk, &start_factor), | ||
956 | size_base = to_size_factor(sizek, &size_factor), | ||
957 | |||
923 | printk(KERN_DEBUG "Setting variable MTRR %d, " | 958 | printk(KERN_DEBUG "Setting variable MTRR %d, " |
924 | "base: %ldMB, range: %ldMB, type %s\n", | 959 | "base: %ld%cB, range: %ld%cB, type %s\n", |
925 | reg, range_startk >> 10, sizek >> 10, | 960 | reg, start_base, start_factor, |
961 | size_base, size_factor, | ||
926 | (type == MTRR_TYPE_UNCACHABLE)?"UC": | 962 | (type == MTRR_TYPE_UNCACHABLE)?"UC": |
927 | ((type == MTRR_TYPE_WRBACK)?"WB":"Other") | 963 | ((type == MTRR_TYPE_WRBACK)?"WB":"Other") |
928 | ); | 964 | ); |
965 | } | ||
929 | save_var_mtrr(reg++, range_startk, sizek, type); | 966 | save_var_mtrr(reg++, range_startk, sizek, type); |
930 | range_startk += sizek; | 967 | range_startk += sizek; |
931 | range_sizek -= sizek; | 968 | range_sizek -= sizek; |
@@ -970,6 +1007,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
970 | /* try to append some small hole */ | 1007 | /* try to append some small hole */ |
971 | range0_basek = state->range_startk; | 1008 | range0_basek = state->range_startk; |
972 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | 1009 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
1010 | |||
1011 | /* no increase */ | ||
973 | if (range0_sizek == state->range_sizek) { | 1012 | if (range0_sizek == state->range_sizek) { |
974 | if (debug_print) | 1013 | if (debug_print) |
975 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | 1014 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", |
@@ -980,13 +1019,40 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
980 | return 0; | 1019 | return 0; |
981 | } | 1020 | } |
982 | 1021 | ||
983 | range0_sizek -= chunk_sizek; | 1022 | /* only cut back, when it is not the last */ |
984 | if (range0_sizek && sizek) { | 1023 | if (sizek) { |
985 | while (range0_basek + range0_sizek > (basek + sizek)) { | 1024 | while (range0_basek + range0_sizek > (basek + sizek)) { |
986 | range0_sizek -= chunk_sizek; | 1025 | if (range0_sizek >= chunk_sizek) |
987 | if (!range0_sizek) | 1026 | range0_sizek -= chunk_sizek; |
988 | break; | 1027 | else |
989 | } | 1028 | range0_sizek = 0; |
1029 | |||
1030 | if (!range0_sizek) | ||
1031 | break; | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | second_try: | ||
1036 | range_basek = range0_basek + range0_sizek; | ||
1037 | |||
1038 | /* one hole in the middle */ | ||
1039 | if (range_basek > basek && range_basek <= (basek + sizek)) | ||
1040 | second_sizek = range_basek - basek; | ||
1041 | |||
1042 | if (range0_sizek > state->range_sizek) { | ||
1043 | |||
1044 | /* one hole in middle or at end */ | ||
1045 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | ||
1046 | |||
1047 | /* hole size should be less than half of range0 size */ | ||
1048 | if (hole_sizek >= (range0_sizek >> 1) && | ||
1049 | range0_sizek >= chunk_sizek) { | ||
1050 | range0_sizek -= chunk_sizek; | ||
1051 | second_sizek = 0; | ||
1052 | hole_sizek = 0; | ||
1053 | |||
1054 | goto second_try; | ||
1055 | } | ||
990 | } | 1056 | } |
991 | 1057 | ||
992 | if (range0_sizek) { | 1058 | if (range0_sizek) { |
@@ -996,50 +1062,28 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
996 | (range0_basek + range0_sizek)<<10); | 1062 | (range0_basek + range0_sizek)<<10); |
997 | state->reg = range_to_mtrr(state->reg, range0_basek, | 1063 | state->reg = range_to_mtrr(state->reg, range0_basek, |
998 | range0_sizek, MTRR_TYPE_WRBACK); | 1064 | range0_sizek, MTRR_TYPE_WRBACK); |
999 | |||
1000 | } | ||
1001 | |||
1002 | range_basek = range0_basek + range0_sizek; | ||
1003 | range_sizek = chunk_sizek; | ||
1004 | |||
1005 | if (range_basek + range_sizek > basek && | ||
1006 | range_basek + range_sizek <= (basek + sizek)) { | ||
1007 | /* one hole */ | ||
1008 | second_basek = basek; | ||
1009 | second_sizek = range_basek + range_sizek - basek; | ||
1010 | } | 1065 | } |
1011 | 1066 | ||
1012 | /* if last piece, only could one hole near end */ | 1067 | if (range0_sizek < state->range_sizek) { |
1013 | if ((second_basek || !basek) && | 1068 | /* need to handle left over */ |
1014 | range_sizek - (state->range_sizek - range0_sizek) - second_sizek < | ||
1015 | (chunk_sizek >> 1)) { | ||
1016 | /* | ||
1017 | * one hole in middle (second_sizek is 0) or at end | ||
1018 | * (second_sizek is 0 ) | ||
1019 | */ | ||
1020 | hole_sizek = range_sizek - (state->range_sizek - range0_sizek) | ||
1021 | - second_sizek; | ||
1022 | hole_basek = range_basek + range_sizek - hole_sizek | ||
1023 | - second_sizek; | ||
1024 | } else { | ||
1025 | /* fallback for big hole, or several holes */ | ||
1026 | range_sizek = state->range_sizek - range0_sizek; | 1069 | range_sizek = state->range_sizek - range0_sizek; |
1027 | second_basek = 0; | 1070 | |
1028 | second_sizek = 0; | 1071 | if (debug_print) |
1072 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | ||
1073 | range_basek<<10, | ||
1074 | (range_basek + range_sizek)<<10); | ||
1075 | state->reg = range_to_mtrr(state->reg, range_basek, | ||
1076 | range_sizek, MTRR_TYPE_WRBACK); | ||
1029 | } | 1077 | } |
1030 | 1078 | ||
1031 | if (debug_print) | ||
1032 | printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10, | ||
1033 | (range_basek + range_sizek)<<10); | ||
1034 | state->reg = range_to_mtrr(state->reg, range_basek, range_sizek, | ||
1035 | MTRR_TYPE_WRBACK); | ||
1036 | if (hole_sizek) { | 1079 | if (hole_sizek) { |
1080 | hole_basek = range_basek - hole_sizek - second_sizek; | ||
1037 | if (debug_print) | 1081 | if (debug_print) |
1038 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | 1082 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", |
1039 | hole_basek<<10, (hole_basek + hole_sizek)<<10); | 1083 | hole_basek<<10, |
1040 | state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, | 1084 | (hole_basek + hole_sizek)<<10); |
1041 | MTRR_TYPE_UNCACHABLE); | 1085 | state->reg = range_to_mtrr(state->reg, hole_basek, |
1042 | 1086 | hole_sizek, MTRR_TYPE_UNCACHABLE); | |
1043 | } | 1087 | } |
1044 | 1088 | ||
1045 | return second_sizek; | 1089 | return second_sizek; |
@@ -1154,11 +1198,11 @@ struct mtrr_cleanup_result { | |||
1154 | }; | 1198 | }; |
1155 | 1199 | ||
1156 | /* | 1200 | /* |
1157 | * gran_size: 1M, 2M, ..., 2G | 1201 | * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G |
1158 | * chunk size: gran_size, ..., 4G | 1202 | * chunk size: gran_size, ..., 2G |
1159 | * so we need (2+13)*6 | 1203 | * so we need (1+16)*8 |
1160 | */ | 1204 | */ |
1161 | #define NUM_RESULT 90 | 1205 | #define NUM_RESULT 136 |
1162 | #define PSHIFT (PAGE_SHIFT - 10) | 1206 | #define PSHIFT (PAGE_SHIFT - 10) |
1163 | 1207 | ||
1164 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; | 1208 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; |
@@ -1168,13 +1212,14 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | |||
1168 | static int __init mtrr_cleanup(unsigned address_bits) | 1212 | static int __init mtrr_cleanup(unsigned address_bits) |
1169 | { | 1213 | { |
1170 | unsigned long extra_remove_base, extra_remove_size; | 1214 | unsigned long extra_remove_base, extra_remove_size; |
1171 | unsigned long i, base, size, def, dummy; | 1215 | unsigned long base, size, def, dummy; |
1172 | mtrr_type type; | 1216 | mtrr_type type; |
1173 | int nr_range, nr_range_new; | 1217 | int nr_range, nr_range_new; |
1174 | u64 chunk_size, gran_size; | 1218 | u64 chunk_size, gran_size; |
1175 | unsigned long range_sums, range_sums_new; | 1219 | unsigned long range_sums, range_sums_new; |
1176 | int index_good; | 1220 | int index_good; |
1177 | int num_reg_good; | 1221 | int num_reg_good; |
1222 | int i; | ||
1178 | 1223 | ||
1179 | /* extra one for all 0 */ | 1224 | /* extra one for all 0 */ |
1180 | int num[MTRR_NUM_TYPES + 1]; | 1225 | int num[MTRR_NUM_TYPES + 1]; |
@@ -1204,6 +1249,8 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1204 | continue; | 1249 | continue; |
1205 | if (!size) | 1250 | if (!size) |
1206 | type = MTRR_NUM_TYPES; | 1251 | type = MTRR_NUM_TYPES; |
1252 | if (type == MTRR_TYPE_WRPROT) | ||
1253 | type = MTRR_TYPE_UNCACHABLE; | ||
1207 | num[type]++; | 1254 | num[type]++; |
1208 | } | 1255 | } |
1209 | 1256 | ||
@@ -1216,23 +1263,57 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1216 | num_var_ranges - num[MTRR_NUM_TYPES]) | 1263 | num_var_ranges - num[MTRR_NUM_TYPES]) |
1217 | return 0; | 1264 | return 0; |
1218 | 1265 | ||
1266 | /* print original var MTRRs at first, for debugging: */ | ||
1267 | printk(KERN_DEBUG "original variable MTRRs\n"); | ||
1268 | for (i = 0; i < num_var_ranges; i++) { | ||
1269 | char start_factor = 'K', size_factor = 'K'; | ||
1270 | unsigned long start_base, size_base; | ||
1271 | |||
1272 | size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); | ||
1273 | if (!size_base) | ||
1274 | continue; | ||
1275 | |||
1276 | size_base = to_size_factor(size_base, &size_factor), | ||
1277 | start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); | ||
1278 | start_base = to_size_factor(start_base, &start_factor), | ||
1279 | type = range_state[i].type; | ||
1280 | |||
1281 | printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", | ||
1282 | i, start_base, start_factor, | ||
1283 | size_base, size_factor, | ||
1284 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : | ||
1285 | ((type == MTRR_TYPE_WRPROT) ? "WP" : | ||
1286 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) | ||
1287 | ); | ||
1288 | } | ||
1289 | |||
1219 | memset(range, 0, sizeof(range)); | 1290 | memset(range, 0, sizeof(range)); |
1220 | extra_remove_size = 0; | 1291 | extra_remove_size = 0; |
1221 | if (mtrr_tom2) { | 1292 | extra_remove_base = 1 << (32 - PAGE_SHIFT); |
1222 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | 1293 | if (mtrr_tom2) |
1223 | extra_remove_size = | 1294 | extra_remove_size = |
1224 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | 1295 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; |
1225 | } | ||
1226 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | 1296 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, |
1227 | extra_remove_size); | 1297 | extra_remove_size); |
1298 | /* | ||
1299 | * [0, 1M) should always be coverred by var mtrr with WB | ||
1300 | * and fixed mtrrs should take effective before var mtrr for it | ||
1301 | */ | ||
1302 | nr_range = add_range_with_merge(range, nr_range, 0, | ||
1303 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | ||
1304 | /* sort the ranges */ | ||
1305 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
1306 | |||
1228 | range_sums = sum_ranges(range, nr_range); | 1307 | range_sums = sum_ranges(range, nr_range); |
1229 | printk(KERN_INFO "total RAM coverred: %ldM\n", | 1308 | printk(KERN_INFO "total RAM coverred: %ldM\n", |
1230 | range_sums >> (20 - PAGE_SHIFT)); | 1309 | range_sums >> (20 - PAGE_SHIFT)); |
1231 | 1310 | ||
1232 | if (mtrr_chunk_size && mtrr_gran_size) { | 1311 | if (mtrr_chunk_size && mtrr_gran_size) { |
1233 | int num_reg; | 1312 | int num_reg; |
1313 | char gran_factor, chunk_factor, lose_factor; | ||
1314 | unsigned long gran_base, chunk_base, lose_base; | ||
1234 | 1315 | ||
1235 | debug_print = 1; | 1316 | debug_print++; |
1236 | /* convert ranges to var ranges state */ | 1317 | /* convert ranges to var ranges state */ |
1237 | num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, | 1318 | num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, |
1238 | mtrr_gran_size); | 1319 | mtrr_gran_size); |
@@ -1256,34 +1337,48 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1256 | result[i].lose_cover_sizek = | 1337 | result[i].lose_cover_sizek = |
1257 | (range_sums - range_sums_new) << PSHIFT; | 1338 | (range_sums - range_sums_new) << PSHIFT; |
1258 | 1339 | ||
1259 | printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", | 1340 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
1260 | result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10, | 1341 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
1261 | result[i].chunk_sizek >> 10); | 1342 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
1262 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n", | 1343 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
1344 | result[i].bad?"*BAD*":" ", | ||
1345 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
1346 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | ||
1263 | result[i].num_reg, result[i].bad?"-":"", | 1347 | result[i].num_reg, result[i].bad?"-":"", |
1264 | result[i].lose_cover_sizek >> 10); | 1348 | lose_base, lose_factor); |
1265 | if (!result[i].bad) { | 1349 | if (!result[i].bad) { |
1266 | set_var_mtrr_all(address_bits); | 1350 | set_var_mtrr_all(address_bits); |
1267 | return 1; | 1351 | return 1; |
1268 | } | 1352 | } |
1269 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " | 1353 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " |
1270 | "will find optimal one\n"); | 1354 | "will find optimal one\n"); |
1271 | debug_print = 0; | 1355 | debug_print--; |
1272 | memset(result, 0, sizeof(result[0])); | 1356 | memset(result, 0, sizeof(result[0])); |
1273 | } | 1357 | } |
1274 | 1358 | ||
1275 | i = 0; | 1359 | i = 0; |
1276 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); | 1360 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); |
1277 | memset(result, 0, sizeof(result)); | 1361 | memset(result, 0, sizeof(result)); |
1278 | for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) { | 1362 | for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { |
1279 | for (chunk_size = gran_size; chunk_size < (1ULL<<33); | 1363 | char gran_factor; |
1364 | unsigned long gran_base; | ||
1365 | |||
1366 | if (debug_print) | ||
1367 | gran_base = to_size_factor(gran_size >> 10, &gran_factor); | ||
1368 | |||
1369 | for (chunk_size = gran_size; chunk_size < (1ULL<<32); | ||
1280 | chunk_size <<= 1) { | 1370 | chunk_size <<= 1) { |
1281 | int num_reg; | 1371 | int num_reg; |
1282 | 1372 | ||
1283 | if (debug_print) | 1373 | if (debug_print) { |
1284 | printk(KERN_INFO | 1374 | char chunk_factor; |
1285 | "\ngran_size: %lldM chunk_size_size: %lldM\n", | 1375 | unsigned long chunk_base; |
1286 | gran_size >> 20, chunk_size >> 20); | 1376 | |
1377 | chunk_base = to_size_factor(chunk_size>>10, &chunk_factor), | ||
1378 | printk(KERN_INFO "\n"); | ||
1379 | printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n", | ||
1380 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
1381 | } | ||
1287 | if (i >= NUM_RESULT) | 1382 | if (i >= NUM_RESULT) |
1288 | continue; | 1383 | continue; |
1289 | 1384 | ||
@@ -1326,12 +1421,18 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1326 | 1421 | ||
1327 | /* print out all */ | 1422 | /* print out all */ |
1328 | for (i = 0; i < NUM_RESULT; i++) { | 1423 | for (i = 0; i < NUM_RESULT; i++) { |
1329 | printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", | 1424 | char gran_factor, chunk_factor, lose_factor; |
1330 | result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10, | 1425 | unsigned long gran_base, chunk_base, lose_base; |
1331 | result[i].chunk_sizek >> 10); | 1426 | |
1332 | printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n", | 1427 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
1333 | result[i].num_reg, result[i].bad?"-":"", | 1428 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
1334 | result[i].lose_cover_sizek >> 10); | 1429 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
1430 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | ||
1431 | result[i].bad?"*BAD*":" ", | ||
1432 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
1433 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | ||
1434 | result[i].num_reg, result[i].bad?"-":"", | ||
1435 | lose_base, lose_factor); | ||
1335 | } | 1436 | } |
1336 | 1437 | ||
1337 | /* try to find the optimal index */ | 1438 | /* try to find the optimal index */ |
@@ -1339,10 +1440,8 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1339 | nr_mtrr_spare_reg = num_var_ranges - 1; | 1440 | nr_mtrr_spare_reg = num_var_ranges - 1; |
1340 | num_reg_good = -1; | 1441 | num_reg_good = -1; |
1341 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | 1442 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
1342 | if (!min_loss_pfn[i]) { | 1443 | if (!min_loss_pfn[i]) |
1343 | num_reg_good = i; | 1444 | num_reg_good = i; |
1344 | break; | ||
1345 | } | ||
1346 | } | 1445 | } |
1347 | 1446 | ||
1348 | index_good = -1; | 1447 | index_good = -1; |
@@ -1358,21 +1457,26 @@ static int __init mtrr_cleanup(unsigned address_bits) | |||
1358 | } | 1457 | } |
1359 | 1458 | ||
1360 | if (index_good != -1) { | 1459 | if (index_good != -1) { |
1460 | char gran_factor, chunk_factor, lose_factor; | ||
1461 | unsigned long gran_base, chunk_base, lose_base; | ||
1462 | |||
1361 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); | 1463 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); |
1362 | i = index_good; | 1464 | i = index_good; |
1363 | printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t", | 1465 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
1364 | result[i].gran_sizek >> 10, | 1466 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
1365 | result[i].chunk_sizek >> 10); | 1467 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
1366 | printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n", | 1468 | printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t", |
1367 | result[i].num_reg, | 1469 | gran_base, gran_factor, chunk_base, chunk_factor); |
1368 | result[i].lose_cover_sizek >> 10); | 1470 | printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n", |
1471 | result[i].num_reg, lose_base, lose_factor); | ||
1369 | /* convert ranges to var ranges state */ | 1472 | /* convert ranges to var ranges state */ |
1370 | chunk_size = result[i].chunk_sizek; | 1473 | chunk_size = result[i].chunk_sizek; |
1371 | chunk_size <<= 10; | 1474 | chunk_size <<= 10; |
1372 | gran_size = result[i].gran_sizek; | 1475 | gran_size = result[i].gran_sizek; |
1373 | gran_size <<= 10; | 1476 | gran_size <<= 10; |
1374 | debug_print = 1; | 1477 | debug_print++; |
1375 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); | 1478 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
1479 | debug_print--; | ||
1376 | set_var_mtrr_all(address_bits); | 1480 | set_var_mtrr_all(address_bits); |
1377 | return 1; | 1481 | return 1; |
1378 | } | 1482 | } |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 05cc22dbd4ff..6bff382094f5 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -295,13 +295,19 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
295 | /* setup the timer */ | 295 | /* setup the timer */ |
296 | wrmsr(evntsel_msr, evntsel, 0); | 296 | wrmsr(evntsel_msr, evntsel, 0); |
297 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | 297 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); |
298 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
299 | evntsel |= K7_EVNTSEL_ENABLE; | ||
300 | wrmsr(evntsel_msr, evntsel, 0); | ||
301 | 298 | ||
299 | /* initialize the wd struct before enabling */ | ||
302 | wd->perfctr_msr = perfctr_msr; | 300 | wd->perfctr_msr = perfctr_msr; |
303 | wd->evntsel_msr = evntsel_msr; | 301 | wd->evntsel_msr = evntsel_msr; |
304 | wd->cccr_msr = 0; /* unused */ | 302 | wd->cccr_msr = 0; /* unused */ |
303 | |||
304 | /* ok, everything is initialized, announce that we're set */ | ||
305 | cpu_nmi_set_wd_enabled(); | ||
306 | |||
307 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
308 | evntsel |= K7_EVNTSEL_ENABLE; | ||
309 | wrmsr(evntsel_msr, evntsel, 0); | ||
310 | |||
305 | return 1; | 311 | return 1; |
306 | } | 312 | } |
307 | 313 | ||
@@ -379,13 +385,19 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
379 | wrmsr(evntsel_msr, evntsel, 0); | 385 | wrmsr(evntsel_msr, evntsel, 0); |
380 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 386 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
381 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | 387 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); |
382 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
383 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
384 | wrmsr(evntsel_msr, evntsel, 0); | ||
385 | 388 | ||
389 | /* initialize the wd struct before enabling */ | ||
386 | wd->perfctr_msr = perfctr_msr; | 390 | wd->perfctr_msr = perfctr_msr; |
387 | wd->evntsel_msr = evntsel_msr; | 391 | wd->evntsel_msr = evntsel_msr; |
388 | wd->cccr_msr = 0; /* unused */ | 392 | wd->cccr_msr = 0; /* unused */ |
393 | |||
394 | /* ok, everything is initialized, announce that we're set */ | ||
395 | cpu_nmi_set_wd_enabled(); | ||
396 | |||
397 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
398 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
399 | wrmsr(evntsel_msr, evntsel, 0); | ||
400 | |||
389 | return 1; | 401 | return 1; |
390 | } | 402 | } |
391 | 403 | ||
@@ -432,6 +444,27 @@ static const struct wd_ops p6_wd_ops = { | |||
432 | #define P4_CCCR_ENABLE (1 << 12) | 444 | #define P4_CCCR_ENABLE (1 << 12) |
433 | #define P4_CCCR_OVF (1 << 31) | 445 | #define P4_CCCR_OVF (1 << 31) |
434 | 446 | ||
447 | #define P4_CONTROLS 18 | ||
448 | static unsigned int p4_controls[18] = { | ||
449 | MSR_P4_BPU_CCCR0, | ||
450 | MSR_P4_BPU_CCCR1, | ||
451 | MSR_P4_BPU_CCCR2, | ||
452 | MSR_P4_BPU_CCCR3, | ||
453 | MSR_P4_MS_CCCR0, | ||
454 | MSR_P4_MS_CCCR1, | ||
455 | MSR_P4_MS_CCCR2, | ||
456 | MSR_P4_MS_CCCR3, | ||
457 | MSR_P4_FLAME_CCCR0, | ||
458 | MSR_P4_FLAME_CCCR1, | ||
459 | MSR_P4_FLAME_CCCR2, | ||
460 | MSR_P4_FLAME_CCCR3, | ||
461 | MSR_P4_IQ_CCCR0, | ||
462 | MSR_P4_IQ_CCCR1, | ||
463 | MSR_P4_IQ_CCCR2, | ||
464 | MSR_P4_IQ_CCCR3, | ||
465 | MSR_P4_IQ_CCCR4, | ||
466 | MSR_P4_IQ_CCCR5, | ||
467 | }; | ||
435 | /* | 468 | /* |
436 | * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | 469 | * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter |
437 | * CRU_ESCR0 (with any non-null event selector) through a complemented | 470 | * CRU_ESCR0 (with any non-null event selector) through a complemented |
@@ -473,6 +506,26 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
473 | evntsel_msr = MSR_P4_CRU_ESCR0; | 506 | evntsel_msr = MSR_P4_CRU_ESCR0; |
474 | cccr_msr = MSR_P4_IQ_CCCR0; | 507 | cccr_msr = MSR_P4_IQ_CCCR0; |
475 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | 508 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); |
509 | |||
510 | /* | ||
511 | * If we're on the kdump kernel or other situation, we may | ||
512 | * still have other performance counter registers set to | ||
513 | * interrupt and they'll keep interrupting forever because | ||
514 | * of the P4_CCCR_OVF quirk. So we need to ACK all the | ||
515 | * pending interrupts and disable all the registers here, | ||
516 | * before reenabling the NMI delivery. Refer to p4_rearm() | ||
517 | * about the P4_CCCR_OVF quirk. | ||
518 | */ | ||
519 | if (reset_devices) { | ||
520 | unsigned int low, high; | ||
521 | int i; | ||
522 | |||
523 | for (i = 0; i < P4_CONTROLS; i++) { | ||
524 | rdmsr(p4_controls[i], low, high); | ||
525 | low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF); | ||
526 | wrmsr(p4_controls[i], low, high); | ||
527 | } | ||
528 | } | ||
476 | } else { | 529 | } else { |
477 | /* logical cpu 1 */ | 530 | /* logical cpu 1 */ |
478 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | 531 | perfctr_msr = MSR_P4_IQ_PERFCTR1; |
@@ -499,12 +552,17 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
499 | wrmsr(evntsel_msr, evntsel, 0); | 552 | wrmsr(evntsel_msr, evntsel, 0); |
500 | wrmsr(cccr_msr, cccr_val, 0); | 553 | wrmsr(cccr_msr, cccr_val, 0); |
501 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); | 554 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); |
502 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 555 | |
503 | cccr_val |= P4_CCCR_ENABLE; | ||
504 | wrmsr(cccr_msr, cccr_val, 0); | ||
505 | wd->perfctr_msr = perfctr_msr; | 556 | wd->perfctr_msr = perfctr_msr; |
506 | wd->evntsel_msr = evntsel_msr; | 557 | wd->evntsel_msr = evntsel_msr; |
507 | wd->cccr_msr = cccr_msr; | 558 | wd->cccr_msr = cccr_msr; |
559 | |||
560 | /* ok, everything is initialized, announce that we're set */ | ||
561 | cpu_nmi_set_wd_enabled(); | ||
562 | |||
563 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
564 | cccr_val |= P4_CCCR_ENABLE; | ||
565 | wrmsr(cccr_msr, cccr_val, 0); | ||
508 | return 1; | 566 | return 1; |
509 | } | 567 | } |
510 | 568 | ||
@@ -620,13 +678,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
620 | wrmsr(evntsel_msr, evntsel, 0); | 678 | wrmsr(evntsel_msr, evntsel, 0); |
621 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 679 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
622 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); | 680 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); |
623 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
624 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
625 | wrmsr(evntsel_msr, evntsel, 0); | ||
626 | 681 | ||
627 | wd->perfctr_msr = perfctr_msr; | 682 | wd->perfctr_msr = perfctr_msr; |
628 | wd->evntsel_msr = evntsel_msr; | 683 | wd->evntsel_msr = evntsel_msr; |
629 | wd->cccr_msr = 0; /* unused */ | 684 | wd->cccr_msr = 0; /* unused */ |
685 | |||
686 | /* ok, everything is initialized, announce that we're set */ | ||
687 | cpu_nmi_set_wd_enabled(); | ||
688 | |||
689 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
690 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
691 | wrmsr(evntsel_msr, evntsel, 0); | ||
630 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); | 692 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
631 | return 1; | 693 | return 1; |
632 | } | 694 | } |
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c new file mode 100644 index 000000000000..5abbea297e0c --- /dev/null +++ b/arch/x86/kernel/cpu/powerflags.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Strings for the various x86 power flags | ||
3 | * | ||
4 | * This file must not contain any executable code. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cpufeature.h> | ||
8 | |||
9 | const char *const x86_power_flags[32] = { | ||
10 | "ts", /* temperature sensor */ | ||
11 | "fid", /* frequency id control */ | ||
12 | "vid", /* voltage id control */ | ||
13 | "ttp", /* thermal trip */ | ||
14 | "tm", | ||
15 | "stc", | ||
16 | "100mhzsteps", | ||
17 | "hwpstate", | ||
18 | "", /* tsc invariant mapped to constant_tsc */ | ||
19 | /* nothing */ | ||
20 | }; | ||
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index b911a2c61b8f..52b3fefbd5af 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -5,6 +5,18 @@ | |||
5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | #include "cpu.h" | 6 | #include "cpu.h" |
7 | 7 | ||
8 | static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) | ||
9 | { | ||
10 | u32 xlvl; | ||
11 | |||
12 | /* Transmeta-defined flags: level 0x80860001 */ | ||
13 | xlvl = cpuid_eax(0x80860000); | ||
14 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
15 | if (xlvl >= 0x80860001) | ||
16 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
17 | } | ||
18 | } | ||
19 | |||
8 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | 20 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) |
9 | { | 21 | { |
10 | unsigned int cap_mask, uk, max, dummy; | 22 | unsigned int cap_mask, uk, max, dummy; |
@@ -12,7 +24,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
12 | unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; | 24 | unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; |
13 | char cpu_info[65]; | 25 | char cpu_info[65]; |
14 | 26 | ||
15 | get_model_name(c); /* Same as AMD/Cyrix */ | 27 | early_init_transmeta(c); |
28 | |||
16 | display_cacheinfo(c); | 29 | display_cacheinfo(c); |
17 | 30 | ||
18 | /* Print CMS and CPU revision */ | 31 | /* Print CMS and CPU revision */ |
@@ -85,23 +98,12 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
85 | #endif | 98 | #endif |
86 | } | 99 | } |
87 | 100 | ||
88 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c) | ||
89 | { | ||
90 | u32 xlvl; | ||
91 | |||
92 | /* Transmeta-defined flags: level 0x80860001 */ | ||
93 | xlvl = cpuid_eax(0x80860000); | ||
94 | if ((xlvl & 0xffff0000) == 0x80860000) { | ||
95 | if (xlvl >= 0x80860001) | ||
96 | c->x86_capability[2] = cpuid_edx(0x80860001); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | 101 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { |
101 | .c_vendor = "Transmeta", | 102 | .c_vendor = "Transmeta", |
102 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
104 | .c_early_init = early_init_transmeta, | ||
103 | .c_init = init_transmeta, | 105 | .c_init = init_transmeta, |
104 | .c_identify = transmeta_identify, | 106 | .c_x86_vendor = X86_VENDOR_TRANSMETA, |
105 | }; | 107 | }; |
106 | 108 | ||
107 | cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); | 109 | cpu_dev_register(transmeta_cpu_dev); |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index b1fc90989d75..e777f79e0960 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = { | |||
19 | } | 19 | } |
20 | }, | 20 | }, |
21 | }, | 21 | }, |
22 | .c_x86_vendor = X86_VENDOR_UMC, | ||
22 | }; | 23 | }; |
23 | 24 | ||
24 | cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); | 25 | cpu_dev_register(umc_cpu_dev); |
25 | 26 | ||