aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/mtrr/generic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/kernel/cpu/mtrr/generic.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/i386/kernel/cpu/mtrr/generic.c')
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c417
1 files changed, 417 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
new file mode 100644
index 000000000000..a4cce454d09b
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -0,0 +1,417 @@
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
6#include <asm/io.h>
7#include <asm/mtrr.h>
8#include <asm/msr.h>
9#include <asm/system.h>
10#include <asm/cpufeature.h>
11#include <asm/tlbflush.h>
12#include "mtrr.h"
13
14struct mtrr_state {
15 struct mtrr_var_range *var_ranges;
16 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
17 unsigned char enabled;
18 mtrr_type def_type;
19};
20
21static unsigned long smp_changes_mask;
22static struct mtrr_state mtrr_state = {};
23
24/* Get the MSR pair relating to a var range */
25static void __init
26get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
27{
28 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
29 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
30}
31
32static void __init
33get_fixed_ranges(mtrr_type * frs)
34{
35 unsigned int *p = (unsigned int *) frs;
36 int i;
37
38 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
39
40 for (i = 0; i < 2; i++)
41 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
42 for (i = 0; i < 8; i++)
43 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
44}
45
46/* Grab all of the MTRR state for this CPU into *state */
47void __init get_mtrr_state(void)
48{
49 unsigned int i;
50 struct mtrr_var_range *vrs;
51 unsigned lo, dummy;
52
53 if (!mtrr_state.var_ranges) {
54 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
55 GFP_KERNEL);
56 if (!mtrr_state.var_ranges)
57 return;
58 }
59 vrs = mtrr_state.var_ranges;
60
61 for (i = 0; i < num_var_ranges; i++)
62 get_mtrr_var_range(i, &vrs[i]);
63 get_fixed_ranges(mtrr_state.fixed_ranges);
64
65 rdmsr(MTRRdefType_MSR, lo, dummy);
66 mtrr_state.def_type = (lo & 0xff);
67 mtrr_state.enabled = (lo & 0xc00) >> 10;
68}
69
70/* Free resources associated with a struct mtrr_state */
71void __init finalize_mtrr_state(void)
72{
73 if (mtrr_state.var_ranges)
74 kfree(mtrr_state.var_ranges);
75 mtrr_state.var_ranges = NULL;
76}
77
78/* Some BIOS's are fucked and don't set all MTRRs the same! */
79void __init mtrr_state_warn(void)
80{
81 unsigned long mask = smp_changes_mask;
82
83 if (!mask)
84 return;
85 if (mask & MTRR_CHANGE_MASK_FIXED)
86 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
87 if (mask & MTRR_CHANGE_MASK_VARIABLE)
88 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
89 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
90 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
91 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
92 printk(KERN_INFO "mtrr: corrected configuration.\n");
93}
94
95/* Doesn't attempt to pass an error out to MTRR users
96 because it's quite complicated in some cases and probably not
97 worth it because the best error handling is to ignore it. */
98void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
99{
100 if (wrmsr_safe(msr, a, b) < 0)
101 printk(KERN_ERR
102 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
103 smp_processor_id(), msr, a, b);
104}
105
106int generic_get_free_region(unsigned long base, unsigned long size)
107/* [SUMMARY] Get a free MTRR.
108 <base> The starting (base) address of the region.
109 <size> The size (in bytes) of the region.
110 [RETURNS] The index of the region on success, else -1 on error.
111*/
112{
113 int i, max;
114 mtrr_type ltype;
115 unsigned long lbase;
116 unsigned lsize;
117
118 max = num_var_ranges;
119 for (i = 0; i < max; ++i) {
120 mtrr_if->get(i, &lbase, &lsize, &ltype);
121 if (lsize == 0)
122 return i;
123 }
124 return -ENOSPC;
125}
126
127void generic_get_mtrr(unsigned int reg, unsigned long *base,
128 unsigned int *size, mtrr_type * type)
129{
130 unsigned int mask_lo, mask_hi, base_lo, base_hi;
131
132 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
133 if ((mask_lo & 0x800) == 0) {
134 /* Invalid (i.e. free) range */
135 *base = 0;
136 *size = 0;
137 *type = 0;
138 return;
139 }
140
141 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
142
143 /* Work out the shifted address mask. */
144 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
145 | mask_lo >> PAGE_SHIFT;
146
147 /* This works correctly if size is a power of two, i.e. a
148 contiguous range. */
149 *size = -mask_lo;
150 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
151 *type = base_lo & 0xff;
152}
153
154static int set_fixed_ranges(mtrr_type * frs)
155{
156 unsigned int *p = (unsigned int *) frs;
157 int changed = FALSE;
158 int i;
159 unsigned int lo, hi;
160
161 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
162 if (p[0] != lo || p[1] != hi) {
163 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
164 changed = TRUE;
165 }
166
167 for (i = 0; i < 2; i++) {
168 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
169 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
170 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
171 p[3 + i * 2]);
172 changed = TRUE;
173 }
174 }
175
176 for (i = 0; i < 8; i++) {
177 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
178 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
179 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
180 p[7 + i * 2]);
181 changed = TRUE;
182 }
183 }
184 return changed;
185}
186
187/* Set the MSR pair relating to a var range. Returns TRUE if
188 changes are made */
189static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
190{
191 unsigned int lo, hi;
192 int changed = FALSE;
193
194 rdmsr(MTRRphysBase_MSR(index), lo, hi);
195 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
196 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
197 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
198 changed = TRUE;
199 }
200
201 rdmsr(MTRRphysMask_MSR(index), lo, hi);
202
203 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
204 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
205 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
206 changed = TRUE;
207 }
208 return changed;
209}
210
211static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
212/* [SUMMARY] Set the MTRR state for this CPU.
213 <state> The MTRR state information to read.
214 <ctxt> Some relevant CPU context.
215 [NOTE] The CPU must already be in a safe state for MTRR changes.
216 [RETURNS] 0 if no changes made, else a mask indication what was changed.
217*/
218{
219 unsigned int i;
220 unsigned long change_mask = 0;
221
222 for (i = 0; i < num_var_ranges; i++)
223 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
224 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
225
226 if (set_fixed_ranges(mtrr_state.fixed_ranges))
227 change_mask |= MTRR_CHANGE_MASK_FIXED;
228
229 /* Set_mtrr_restore restores the old value of MTRRdefType,
230 so to set it we fiddle with the saved value */
231 if ((deftype_lo & 0xff) != mtrr_state.def_type
232 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
233 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
234 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
235 }
236
237 return change_mask;
238}
239
240
241static unsigned long cr4 = 0;
242static u32 deftype_lo, deftype_hi;
243static DEFINE_SPINLOCK(set_atomicity_lock);
244
245/*
246 * Since we are disabling the cache don't allow any interrupts - they
247 * would run extremely slow and would only increase the pain. The caller must
248 * ensure that local interrupts are disabled and are reenabled after post_set()
249 * has been called.
250 */
251
252static void prepare_set(void)
253{
254 unsigned long cr0;
255
256 /* Note that this is not ideal, since the cache is only flushed/disabled
257 for this CPU while the MTRRs are changed, but changing this requires
258 more invasive changes to the way the kernel boots */
259
260 spin_lock(&set_atomicity_lock);
261
262 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
263 cr0 = read_cr0() | 0x40000000; /* set CD flag */
264 write_cr0(cr0);
265 wbinvd();
266
267 /* Save value of CR4 and clear Page Global Enable (bit 7) */
268 if ( cpu_has_pge ) {
269 cr4 = read_cr4();
270 write_cr4(cr4 & ~X86_CR4_PGE);
271 }
272
273 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
274 __flush_tlb();
275
276 /* Save MTRR state */
277 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
278
279 /* Disable MTRRs, and set the default type to uncached */
280 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
281}
282
283static void post_set(void)
284{
285 /* Flush TLBs (no need to flush caches - they are disabled) */
286 __flush_tlb();
287
288 /* Intel (P6) standard MTRRs */
289 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
290
291 /* Enable caches */
292 write_cr0(read_cr0() & 0xbfffffff);
293
294 /* Restore value of CR4 */
295 if ( cpu_has_pge )
296 write_cr4(cr4);
297 spin_unlock(&set_atomicity_lock);
298}
299
300static void generic_set_all(void)
301{
302 unsigned long mask, count;
303 unsigned long flags;
304
305 local_irq_save(flags);
306 prepare_set();
307
308 /* Actually set the state */
309 mask = set_mtrr_state(deftype_lo,deftype_hi);
310
311 post_set();
312 local_irq_restore(flags);
313
314 /* Use the atomic bitops to update the global mask */
315 for (count = 0; count < sizeof mask * 8; ++count) {
316 if (mask & 0x01)
317 set_bit(count, &smp_changes_mask);
318 mask >>= 1;
319 }
320
321}
322
323static void generic_set_mtrr(unsigned int reg, unsigned long base,
324 unsigned long size, mtrr_type type)
325/* [SUMMARY] Set variable MTRR register on the local CPU.
326 <reg> The register to set.
327 <base> The base address of the region.
328 <size> The size of the region. If this is 0 the region is disabled.
329 <type> The type of the region.
330 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
331 be done externally.
332 [RETURNS] Nothing.
333*/
334{
335 unsigned long flags;
336
337 local_irq_save(flags);
338 prepare_set();
339
340 if (size == 0) {
341 /* The invalid bit is kept in the mask, so we simply clear the
342 relevant mask register to disable a range. */
343 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
344 } else {
345 mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
346 (base & size_and_mask) >> (32 - PAGE_SHIFT));
347 mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
348 (-size & size_and_mask) >> (32 - PAGE_SHIFT));
349 }
350
351 post_set();
352 local_irq_restore(flags);
353}
354
355int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
356{
357 unsigned long lbase, last;
358
359 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
360 and not touch 0x70000000->0x7003FFFF */
361 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
362 boot_cpu_data.x86_model == 1 &&
363 boot_cpu_data.x86_mask <= 7) {
364 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
365 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
366 return -EINVAL;
367 }
368 if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
369 (type == MTRR_TYPE_WRCOMB
370 || type == MTRR_TYPE_WRBACK)) {
371 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
372 return -EINVAL;
373 }
374 }
375
376 if (base + size < 0x100) {
377 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
378 base, size);
379 return -EINVAL;
380 }
381 /* Check upper bits of base and last are equal and lower bits are 0
382 for base and 1 for last */
383 last = base + size - 1;
384 for (lbase = base; !(lbase & 1) && (last & 1);
385 lbase = lbase >> 1, last = last >> 1) ;
386 if (lbase != last) {
387 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
388 base, size);
389 return -EINVAL;
390 }
391 return 0;
392}
393
394
395static int generic_have_wrcomb(void)
396{
397 unsigned long config, dummy;
398 rdmsr(MTRRcap_MSR, config, dummy);
399 return (config & (1 << 10));
400}
401
402int positive_have_wrcomb(void)
403{
404 return 1;
405}
406
407/* generic structure...
408 */
409struct mtrr_ops generic_mtrr_ops = {
410 .use_intel_if = 1,
411 .set_all = generic_set_all,
412 .get = generic_get_mtrr,
413 .get_free_region = generic_get_free_region,
414 .set = generic_set_mtrr,
415 .validate_add_page = generic_validate_add_page,
416 .have_wrcomb = generic_have_wrcomb,
417};