aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/mtrr
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/kernel/cpu/mtrr
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/i386/kernel/cpu/mtrr')
-rw-r--r--arch/i386/kernel/cpu/mtrr/Makefile5
-rw-r--r--arch/i386/kernel/cpu/mtrr/amd.c121
-rw-r--r--arch/i386/kernel/cpu/mtrr/centaur.c223
-rw-r--r--arch/i386/kernel/cpu/mtrr/changelog229
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c364
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c417
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c374
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c693
-rw-r--r--arch/i386/kernel/cpu/mtrr/mtrr.h98
-rw-r--r--arch/i386/kernel/cpu/mtrr/state.c78
10 files changed, 2602 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
new file mode 100644
index 000000000000..a25b701ab84e
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/Makefile
@@ -0,0 +1,5 @@
1obj-y := main.o if.o generic.o state.o
2obj-y += amd.o
3obj-y += cyrix.o
4obj-y += centaur.o
5
diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c
new file mode 100644
index 000000000000..1a1e04b6fd00
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/amd.c
@@ -0,0 +1,121 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5
6#include "mtrr.h"
7
8static void
9amd_get_mtrr(unsigned int reg, unsigned long *base,
10 unsigned int *size, mtrr_type * type)
11{
12 unsigned long low, high;
13
14 rdmsr(MSR_K6_UWCCR, low, high);
15 /* Upper dword is region 1, lower is region 0 */
16 if (reg == 1)
17 low = high;
18 /* The base masks off on the right alignment */
19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
20 *type = 0;
21 if (low & 1)
22 *type = MTRR_TYPE_UNCACHABLE;
23 if (low & 2)
24 *type = MTRR_TYPE_WRCOMB;
25 if (!(low & 3)) {
26 *size = 0;
27 return;
28 }
29 /*
30 * This needs a little explaining. The size is stored as an
31 * inverted mask of bits of 128K granularity 15 bits long offset
32 * 2 bits
33 *
34 * So to get a size we do invert the mask and add 1 to the lowest
35 * mask bit (4 as its 2 bits in). This gives us a size we then shift
36 * to turn into 128K blocks
37 *
38 * eg 111 1111 1111 1100 is 512K
39 *
40 * invert 000 0000 0000 0011
41 * +1 000 0000 0000 0100
42 * *128K ...
43 */
44 low = (~low) & 0x1FFFC;
45 *size = (low + 4) << (15 - PAGE_SHIFT);
46 return;
47}
48
49static void amd_set_mtrr(unsigned int reg, unsigned long base,
50 unsigned long size, mtrr_type type)
51/* [SUMMARY] Set variable MTRR register on the local CPU.
52 <reg> The register to set.
53 <base> The base address of the region.
54 <size> The size of the region. If this is 0 the region is disabled.
55 <type> The type of the region.
56 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
57 be done externally.
58 [RETURNS] Nothing.
59*/
60{
61 u32 regs[2];
62
63 /*
64 * Low is MTRR0 , High MTRR 1
65 */
66 rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
67 /*
68 * Blank to disable
69 */
70 if (size == 0)
71 regs[reg] = 0;
72 else
73 /* Set the register to the base, the type (off by one) and an
74 inverted bitmask of the size The size is the only odd
75 bit. We are fed say 512K We invert this and we get 111 1111
76 1111 1011 but if you subtract one and invert you get the
77 desired 111 1111 1111 1100 mask
78
79 But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
80 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
81 | (base << PAGE_SHIFT) | (type + 1);
82
83 /*
84 * The writeback rule is quite specific. See the manual. Its
85 * disable local interrupts, write back the cache, set the mtrr
86 */
87 wbinvd();
88 wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
89}
90
91static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
92{
93 /* Apply the K6 block alignment and size rules
94 In order
95 o Uncached or gathering only
96 o 128K or bigger block
97 o Power of 2 block
98 o base suitably aligned to the power
99 */
100 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
101 || (size & ~(size - 1)) - size || (base & (size - 1)))
102 return -EINVAL;
103 return 0;
104}
105
106static struct mtrr_ops amd_mtrr_ops = {
107 .vendor = X86_VENDOR_AMD,
108 .set = amd_set_mtrr,
109 .get = amd_get_mtrr,
110 .get_free_region = generic_get_free_region,
111 .validate_add_page = amd_validate_add_page,
112 .have_wrcomb = positive_have_wrcomb,
113};
114
115int __init amd_init_mtrr(void)
116{
117 set_mtrr_ops(&amd_mtrr_ops);
118 return 0;
119}
120
121//arch_initcall(amd_mtrr_init);
diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c
new file mode 100644
index 000000000000..33f00ac314ef
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/centaur.c
@@ -0,0 +1,223 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5#include "mtrr.h"
6
7static struct {
8 unsigned long high;
9 unsigned long low;
10} centaur_mcr[8];
11
12static u8 centaur_mcr_reserved;
13static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
14
15/*
16 * Report boot time MCR setups
17 */
18
19static int
20centaur_get_free_region(unsigned long base, unsigned long size)
21/* [SUMMARY] Get a free MTRR.
22 <base> The starting (base) address of the region.
23 <size> The size (in bytes) of the region.
24 [RETURNS] The index of the region on success, else -1 on error.
25*/
26{
27 int i, max;
28 mtrr_type ltype;
29 unsigned long lbase;
30 unsigned int lsize;
31
32 max = num_var_ranges;
33 for (i = 0; i < max; ++i) {
34 if (centaur_mcr_reserved & (1 << i))
35 continue;
36 mtrr_if->get(i, &lbase, &lsize, &ltype);
37 if (lsize == 0)
38 return i;
39 }
40 return -ENOSPC;
41}
42
43void
44mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
45{
46 centaur_mcr[mcr].low = lo;
47 centaur_mcr[mcr].high = hi;
48}
49
50static void
51centaur_get_mcr(unsigned int reg, unsigned long *base,
52 unsigned int *size, mtrr_type * type)
53{
54 *base = centaur_mcr[reg].high >> PAGE_SHIFT;
55 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
56 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
57 if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
58 *type = MTRR_TYPE_UNCACHABLE;
59 if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
60 *type = MTRR_TYPE_WRBACK;
61 if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
62 *type = MTRR_TYPE_WRBACK;
63
64}
65
66static void centaur_set_mcr(unsigned int reg, unsigned long base,
67 unsigned long size, mtrr_type type)
68{
69 unsigned long low, high;
70
71 if (size == 0) {
72 /* Disable */
73 high = low = 0;
74 } else {
75 high = base << PAGE_SHIFT;
76 if (centaur_mcr_type == 0)
77 low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
78 else {
79 if (type == MTRR_TYPE_UNCACHABLE)
80 low = -size << PAGE_SHIFT | 0x02; /* NC */
81 else
82 low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */
83 }
84 }
85 centaur_mcr[reg].high = high;
86 centaur_mcr[reg].low = low;
87 wrmsr(MSR_IDT_MCR0 + reg, low, high);
88}
89
90#if 0
91/*
92 * Initialise the later (saner) Winchip MCR variant. In this version
93 * the BIOS can pass us the registers it has used (but not their values)
94 * and the control register is read/write
95 */
96
97static void __init
98centaur_mcr1_init(void)
99{
100 unsigned i;
101 u32 lo, hi;
102
103 /* Unfortunately, MCR's are read-only, so there is no way to
104 * find out what the bios might have done.
105 */
106
107 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
108 if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */
109 lo &= ~0x1C0; /* clear key */
110 lo |= 0x040; /* set key to 1 */
111 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */
112 }
113
114 centaur_mcr_type = 1;
115
116 /*
117 * Clear any unconfigured MCR's.
118 */
119
120 for (i = 0; i < 8; ++i) {
121 if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
122 if (!(lo & (1 << (9 + i))))
123 wrmsr(MSR_IDT_MCR0 + i, 0, 0);
124 else
125 /*
126 * If the BIOS set up an MCR we cannot see it
127 * but we don't wish to obliterate it
128 */
129 centaur_mcr_reserved |= (1 << i);
130 }
131 }
132 /*
133 * Throw the main write-combining switch...
134 * However if OOSTORE is enabled then people have already done far
135 * cleverer things and we should behave.
136 */
137
138 lo |= 15; /* Write combine enables */
139 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
140}
141
142/*
143 * Initialise the original winchip with read only MCR registers
144 * no used bitmask for the BIOS to pass on and write only control
145 */
146
147static void __init
148centaur_mcr0_init(void)
149{
150 unsigned i;
151
152 /* Unfortunately, MCR's are read-only, so there is no way to
153 * find out what the bios might have done.
154 */
155
156 /* Clear any unconfigured MCR's.
157 * This way we are sure that the centaur_mcr array contains the actual
158 * values. The disadvantage is that any BIOS tweaks are thus undone.
159 *
160 */
161 for (i = 0; i < 8; ++i) {
162 if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
163 wrmsr(MSR_IDT_MCR0 + i, 0, 0);
164 }
165
166 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */
167}
168
169/*
170 * Initialise Winchip series MCR registers
171 */
172
173static void __init
174centaur_mcr_init(void)
175{
176 struct set_mtrr_context ctxt;
177
178 set_mtrr_prepare_save(&ctxt);
179 set_mtrr_cache_disable(&ctxt);
180
181 if (boot_cpu_data.x86_model == 4)
182 centaur_mcr0_init();
183 else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
184 centaur_mcr1_init();
185
186 set_mtrr_done(&ctxt);
187}
188#endif
189
190static int centaur_validate_add_page(unsigned long base,
191 unsigned long size, unsigned int type)
192{
193 /*
194 * FIXME: Winchip2 supports uncached
195 */
196 if (type != MTRR_TYPE_WRCOMB &&
197 (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
198 printk(KERN_WARNING
199 "mtrr: only write-combining%s supported\n",
200 centaur_mcr_type ? " and uncacheable are"
201 : " is");
202 return -EINVAL;
203 }
204 return 0;
205}
206
207static struct mtrr_ops centaur_mtrr_ops = {
208 .vendor = X86_VENDOR_CENTAUR,
209// .init = centaur_mcr_init,
210 .set = centaur_set_mcr,
211 .get = centaur_get_mcr,
212 .get_free_region = centaur_get_free_region,
213 .validate_add_page = centaur_validate_add_page,
214 .have_wrcomb = positive_have_wrcomb,
215};
216
217int __init centaur_init_mtrr(void)
218{
219 set_mtrr_ops(&centaur_mtrr_ops);
220 return 0;
221}
222
223//arch_initcall(centaur_init_mtrr);
diff --git a/arch/i386/kernel/cpu/mtrr/changelog b/arch/i386/kernel/cpu/mtrr/changelog
new file mode 100644
index 000000000000..af1368535955
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/changelog
@@ -0,0 +1,229 @@
1 ChangeLog
2
3 Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
4 Initial register-setting code (from proform-1.0).
5 19971216 Richard Gooch <rgooch@atnf.csiro.au>
6 Original version for /proc/mtrr interface, SMP-safe.
7 v1.0
8 19971217 Richard Gooch <rgooch@atnf.csiro.au>
9 Bug fix for ioctls()'s.
10 Added sample code in Documentation/mtrr.txt
11 v1.1
12 19971218 Richard Gooch <rgooch@atnf.csiro.au>
13 Disallow overlapping regions.
14 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
15 Register-setting fixups.
16 v1.2
17 19971222 Richard Gooch <rgooch@atnf.csiro.au>
18 Fixups for kernel 2.1.75.
19 v1.3
20 19971229 David Wragg <dpw@doc.ic.ac.uk>
21 Register-setting fixups and conformity with Intel conventions.
22 19971229 Richard Gooch <rgooch@atnf.csiro.au>
23 Cosmetic changes and wrote this ChangeLog ;-)
24 19980106 Richard Gooch <rgooch@atnf.csiro.au>
25 Fixups for kernel 2.1.78.
26 v1.4
27 19980119 David Wragg <dpw@doc.ic.ac.uk>
28 Included passive-release enable code (elsewhere in PCI setup).
29 v1.5
30 19980131 Richard Gooch <rgooch@atnf.csiro.au>
31 Replaced global kernel lock with private spinlock.
32 v1.6
33 19980201 Richard Gooch <rgooch@atnf.csiro.au>
34 Added wait for other CPUs to complete changes.
35 v1.7
36 19980202 Richard Gooch <rgooch@atnf.csiro.au>
37 Bug fix in definition of <set_mtrr> for UP.
38 v1.8
39 19980319 Richard Gooch <rgooch@atnf.csiro.au>
40 Fixups for kernel 2.1.90.
41 19980323 Richard Gooch <rgooch@atnf.csiro.au>
42 Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
43 v1.9
44 19980325 Richard Gooch <rgooch@atnf.csiro.au>
45 Fixed test for overlapping regions: confused by adjacent regions
46 19980326 Richard Gooch <rgooch@atnf.csiro.au>
47 Added wbinvd in <set_mtrr_prepare>.
48 19980401 Richard Gooch <rgooch@atnf.csiro.au>
49 Bug fix for non-SMP compilation.
50 19980418 David Wragg <dpw@doc.ic.ac.uk>
51 Fixed-MTRR synchronisation for SMP and use atomic operations
52 instead of spinlocks.
53 19980418 Richard Gooch <rgooch@atnf.csiro.au>
54 Differentiate different MTRR register classes for BIOS fixup.
55 v1.10
56 19980419 David Wragg <dpw@doc.ic.ac.uk>
57 Bug fix in variable MTRR synchronisation.
58 v1.11
59 19980419 Richard Gooch <rgooch@atnf.csiro.au>
60 Fixups for kernel 2.1.97.
61 v1.12
62 19980421 Richard Gooch <rgooch@atnf.csiro.au>
63 Safer synchronisation across CPUs when changing MTRRs.
64 v1.13
65 19980423 Richard Gooch <rgooch@atnf.csiro.au>
66 Bugfix for SMP systems without MTRR support.
67 v1.14
68 19980427 Richard Gooch <rgooch@atnf.csiro.au>
69 Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
70 v1.15
71 19980427 Richard Gooch <rgooch@atnf.csiro.au>
72 Use atomic bitops for setting SMP change mask.
73 v1.16
74 19980428 Richard Gooch <rgooch@atnf.csiro.au>
75 Removed spurious diagnostic message.
76 v1.17
77 19980429 Richard Gooch <rgooch@atnf.csiro.au>
78 Moved register-setting macros into this file.
79 Moved setup code from init/main.c to i386-specific areas.
80 v1.18
81 19980502 Richard Gooch <rgooch@atnf.csiro.au>
82 Moved MTRR detection outside conditionals in <mtrr_init>.
83 v1.19
84 19980502 Richard Gooch <rgooch@atnf.csiro.au>
85 Documentation improvement: mention Pentium II and AGP.
86 v1.20
87 19980521 Richard Gooch <rgooch@atnf.csiro.au>
88 Only manipulate interrupt enable flag on local CPU.
89 Allow enclosed uncachable regions.
90 v1.21
91 19980611 Richard Gooch <rgooch@atnf.csiro.au>
92 Always define <main_lock>.
93 v1.22
94 19980901 Richard Gooch <rgooch@atnf.csiro.au>
95 Removed module support in order to tidy up code.
96 Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
97 Created addition queue for prior to SMP commence.
98 v1.23
99 19980902 Richard Gooch <rgooch@atnf.csiro.au>
100 Ported patch to kernel 2.1.120-pre3.
101 v1.24
102 19980910 Richard Gooch <rgooch@atnf.csiro.au>
103 Removed sanity checks and addition queue: Linus prefers an OOPS.
104 v1.25
105 19981001 Richard Gooch <rgooch@atnf.csiro.au>
106 Fixed harmless compiler warning in include/asm-i386/mtrr.h
107 Fixed version numbering and history for v1.23 -> v1.24.
108 v1.26
109 19990118 Richard Gooch <rgooch@atnf.csiro.au>
110 Added devfs support.
111 v1.27
112 19990123 Richard Gooch <rgooch@atnf.csiro.au>
113 Changed locking to spin with reschedule.
114 Made use of new <smp_call_function>.
115 v1.28
116 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu>
117 Extended the driver to be able to use Cyrix style ARRs.
118 19990204 Richard Gooch <rgooch@atnf.csiro.au>
119 Restructured Cyrix support.
120 v1.29
121 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu>
122 Refined ARR support: enable MAPEN in set_mtrr_prepare()
123 and disable MAPEN in set_mtrr_done().
124 19990205 Richard Gooch <rgooch@atnf.csiro.au>
125 Minor cleanups.
126 v1.30
127 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu>
128 Protect plain 6x86s (and other processors without the
129 Page Global Enable feature) against accessing CR4 in
130 set_mtrr_prepare() and set_mtrr_done().
131 19990210 Richard Gooch <rgooch@atnf.csiro.au>
132 Turned <set_mtrr_up> and <get_mtrr> into function pointers.
133 v1.31
134 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu>
135 Major rewrite of cyrix_arr_init(): do not touch ARRs,
136 leave them as the BIOS have set them up.
137 Enable usage of all 8 ARRs.
138 Avoid multiplications by 3 everywhere and other
139 code clean ups/speed ups.
140 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu>
141 Set up other Cyrix processors identical to the boot cpu.
142 Since Cyrix don't support Intel APIC, this is l'art pour l'art.
143 Weigh ARRs by size:
144 If size <= 32M is given, set up ARR# we were given.
145 If size > 32M is given, set up ARR7 only if it is free,
146 fail otherwise.
147 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu>
148 Also check for size >= 256K if we are to set up ARR7,
149 mtrr_add() returns the value it gets from set_mtrr()
150 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu>
151 Remove Cyrix "coma bug" workaround from here.
152 Moved to linux/arch/i386/kernel/setup.c and
153 linux/include/asm-i386/bugs.h
154 19990228 Richard Gooch <rgooch@atnf.csiro.au>
155 Added MTRRIOC_KILL_ENTRY ioctl(2)
156 Trap for counter underflow in <mtrr_file_del>.
157 Trap for 4 MiB aligned regions for PPro, stepping <= 7.
158 19990301 Richard Gooch <rgooch@atnf.csiro.au>
159 Created <get_free_region> hook.
160 19990305 Richard Gooch <rgooch@atnf.csiro.au>
161 Temporarily disable AMD support now MTRR capability flag is set.
162 v1.32
163 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu>
164 Adjust my changes (19990212-19990218) to Richard Gooch's
165 latest changes. (19990228-19990305)
166 v1.33
167 19990309 Richard Gooch <rgooch@atnf.csiro.au>
168 Fixed typo in <printk> message.
169 19990310 Richard Gooch <rgooch@atnf.csiro.au>
170 Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
171 v1.34
172 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
173 Support Centaur C6 MCR's.
174 19990512 Richard Gooch <rgooch@atnf.csiro.au>
175 Minor cleanups.
176 v1.35
177 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu>
178 Check whether ARR3 is protected in cyrix_get_free_region()
179 and mtrr_del(). The code won't attempt to delete or change it
180 from now on if the BIOS protected ARR3. It silently skips ARR3
181 in cyrix_get_free_region() or returns with an error code from
182 mtrr_del().
183 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu>
184 Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
185 if ARR3 isn't protected. This is needed because if SMM is active
186 and ARR3 isn't protected then deleting and setting ARR3 again
187 may lock up the processor. With SMM entirely disabled, it does
188 not happen.
189 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu>
190 Rearrange switch() statements so the driver accomodates to
191 the fact that the AMD Athlon handles its MTRRs the same way
192 as Intel does.
193 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu>
194 Double check for Intel in mtrr_add()'s big switch() because
195 that revision check is only valid for Intel CPUs.
196 19990819 Alan Cox <alan@redhat.com>
197 Tested Zoltan's changes on a pre production Athlon - 100%
198 success.
199 19991008 Manfred Spraul <manfreds@colorfullife.com>
200 replaced spin_lock_reschedule() with a normal semaphore.
201 v1.36
202 20000221 Richard Gooch <rgooch@atnf.csiro.au>
203 Compile fix if procfs and devfs not enabled.
204 Formatting changes.
205 v1.37
206 20001109 H. Peter Anvin <hpa@zytor.com>
207 Use the new centralized CPU feature detects.
208
209 v1.38
210 20010309 Dave Jones <davej@suse.de>
211 Add support for Cyrix III.
212
213 v1.39
214 20010312 Dave Jones <davej@suse.de>
215 Ugh, I broke AMD support.
216 Reworked fix by Troels Walsted Hansen <troels@thule.no>
217
218 v1.40
219 20010327 Dave Jones <davej@suse.de>
220 Adapted Cyrix III support to include VIA C3.
221
222 v2.0
223 20020306 Patrick Mochel <mochel@osdl.org>
224 Split mtrr.c -> mtrr/*.c
225 Converted to Linux Kernel Coding Style
226 Fixed several minor nits in form
227 Moved some SMP-only functions out, so they can be used
228 for power management in the future.
229 TODO: Fix user interface cruft.
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
new file mode 100644
index 000000000000..933b0dd62f48
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -0,0 +1,364 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5#include <asm/io.h>
6#include "mtrr.h"
7
8int arr3_protected;
9
10static void
11cyrix_get_arr(unsigned int reg, unsigned long *base,
12 unsigned int *size, mtrr_type * type)
13{
14 unsigned long flags;
15 unsigned char arr, ccr3, rcr, shift;
16
17 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
18
19 /* Save flags and disable interrupts */
20 local_irq_save(flags);
21
22 ccr3 = getCx86(CX86_CCR3);
23 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
24 ((unsigned char *) base)[3] = getCx86(arr);
25 ((unsigned char *) base)[2] = getCx86(arr + 1);
26 ((unsigned char *) base)[1] = getCx86(arr + 2);
27 rcr = getCx86(CX86_RCR_BASE + reg);
28 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
29
30 /* Enable interrupts if it was enabled previously */
31 local_irq_restore(flags);
32 shift = ((unsigned char *) base)[1] & 0x0f;
33 *base >>= PAGE_SHIFT;
34
35 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
36 * Note: shift==0xf means 4G, this is unsupported.
37 */
38 if (shift)
39 *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
40 else
41 *size = 0;
42
43 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
44 if (reg < 7) {
45 switch (rcr) {
46 case 1:
47 *type = MTRR_TYPE_UNCACHABLE;
48 break;
49 case 8:
50 *type = MTRR_TYPE_WRBACK;
51 break;
52 case 9:
53 *type = MTRR_TYPE_WRCOMB;
54 break;
55 case 24:
56 default:
57 *type = MTRR_TYPE_WRTHROUGH;
58 break;
59 }
60 } else {
61 switch (rcr) {
62 case 0:
63 *type = MTRR_TYPE_UNCACHABLE;
64 break;
65 case 8:
66 *type = MTRR_TYPE_WRCOMB;
67 break;
68 case 9:
69 *type = MTRR_TYPE_WRBACK;
70 break;
71 case 25:
72 default:
73 *type = MTRR_TYPE_WRTHROUGH;
74 break;
75 }
76 }
77}
78
79static int
80cyrix_get_free_region(unsigned long base, unsigned long size)
81/* [SUMMARY] Get a free ARR.
82 <base> The starting (base) address of the region.
83 <size> The size (in bytes) of the region.
84 [RETURNS] The index of the region on success, else -1 on error.
85*/
86{
87 int i;
88 mtrr_type ltype;
89 unsigned long lbase;
90 unsigned int lsize;
91
92 /* If we are to set up a region >32M then look at ARR7 immediately */
93 if (size > 0x2000) {
94 cyrix_get_arr(7, &lbase, &lsize, &ltype);
95 if (lsize == 0)
96 return 7;
97 /* Else try ARR0-ARR6 first */
98 } else {
99 for (i = 0; i < 7; i++) {
100 cyrix_get_arr(i, &lbase, &lsize, &ltype);
101 if ((i == 3) && arr3_protected)
102 continue;
103 if (lsize == 0)
104 return i;
105 }
106 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
107 cyrix_get_arr(i, &lbase, &lsize, &ltype);
108 if ((lsize == 0) && (size >= 0x40))
109 return i;
110 }
111 return -ENOSPC;
112}
113
114static u32 cr4 = 0;
115static u32 ccr3;
116
117static void prepare_set(void)
118{
119 u32 cr0;
120
121 /* Save value of CR4 and clear Page Global Enable (bit 7) */
122 if ( cpu_has_pge ) {
123 cr4 = read_cr4();
124 write_cr4(cr4 & (unsigned char) ~(1 << 7));
125 }
126
127 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
128 a side-effect */
129 cr0 = read_cr0() | 0x40000000;
130 wbinvd();
131 write_cr0(cr0);
132 wbinvd();
133
134 /* Cyrix ARRs - everything else were excluded at the top */
135 ccr3 = getCx86(CX86_CCR3);
136
137 /* Cyrix ARRs - everything else were excluded at the top */
138 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
139
140}
141
142static void post_set(void)
143{
144 /* Flush caches and TLBs */
145 wbinvd();
146
147 /* Cyrix ARRs - everything else was excluded at the top */
148 setCx86(CX86_CCR3, ccr3);
149
150 /* Enable caches */
151 write_cr0(read_cr0() & 0xbfffffff);
152
153 /* Restore value of CR4 */
154 if ( cpu_has_pge )
155 write_cr4(cr4);
156}
157
158static void cyrix_set_arr(unsigned int reg, unsigned long base,
159 unsigned long size, mtrr_type type)
160{
161 unsigned char arr, arr_type, arr_size;
162
163 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
164
165 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
166 if (reg >= 7)
167 size >>= 6;
168
169 size &= 0x7fff; /* make sure arr_size <= 14 */
170 for (arr_size = 0; size; arr_size++, size >>= 1) ;
171
172 if (reg < 7) {
173 switch (type) {
174 case MTRR_TYPE_UNCACHABLE:
175 arr_type = 1;
176 break;
177 case MTRR_TYPE_WRCOMB:
178 arr_type = 9;
179 break;
180 case MTRR_TYPE_WRTHROUGH:
181 arr_type = 24;
182 break;
183 default:
184 arr_type = 8;
185 break;
186 }
187 } else {
188 switch (type) {
189 case MTRR_TYPE_UNCACHABLE:
190 arr_type = 0;
191 break;
192 case MTRR_TYPE_WRCOMB:
193 arr_type = 8;
194 break;
195 case MTRR_TYPE_WRTHROUGH:
196 arr_type = 25;
197 break;
198 default:
199 arr_type = 9;
200 break;
201 }
202 }
203
204 prepare_set();
205
206 base <<= PAGE_SHIFT;
207 setCx86(arr, ((unsigned char *) &base)[3]);
208 setCx86(arr + 1, ((unsigned char *) &base)[2]);
209 setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
210 setCx86(CX86_RCR_BASE + reg, arr_type);
211
212 post_set();
213}
214
215typedef struct {
216 unsigned long base;
217 unsigned int size;
218 mtrr_type type;
219} arr_state_t;
220
221static arr_state_t arr_state[8] __initdata = {
222 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
223 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
224};
225
226static unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
227
228static void cyrix_set_all(void)
229{
230 int i;
231
232 prepare_set();
233
234 /* the CCRs are not contiguous */
235 for (i = 0; i < 4; i++)
236 setCx86(CX86_CCR0 + i, ccr_state[i]);
237 for (; i < 7; i++)
238 setCx86(CX86_CCR4 + i, ccr_state[i]);
239 for (i = 0; i < 8; i++)
240 cyrix_set_arr(i, arr_state[i].base,
241 arr_state[i].size, arr_state[i].type);
242
243 post_set();
244}
245
246#if 0
247/*
248 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
249 * with the SMM (System Management Mode) mode. So we need the following:
250 * Check whether SMI_LOCK (CCR3 bit 0) is set
251 * if it is set, write a warning message: ARR3 cannot be changed!
252 * (it cannot be changed until the next processor reset)
253 * if it is reset, then we can change it, set all the needed bits:
254 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
255 * - disable access to SMM memory (CCR1 bit 2 reset)
256 * - disable SMM mode (CCR1 bit 1 reset)
257 * - disable write protection of ARR3 (CCR6 bit 1 reset)
258 * - (maybe) disable ARR3
259 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
260 */
261static void __init
262cyrix_arr_init(void)
263{
264 struct set_mtrr_context ctxt;
265 unsigned char ccr[7];
266 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
267#ifdef CONFIG_SMP
268 int i;
269#endif
270
271 /* flush cache and enable MAPEN */
272 set_mtrr_prepare_save(&ctxt);
273 set_mtrr_cache_disable(&ctxt);
274
275 /* Save all CCRs locally */
276 ccr[0] = getCx86(CX86_CCR0);
277 ccr[1] = getCx86(CX86_CCR1);
278 ccr[2] = getCx86(CX86_CCR2);
279 ccr[3] = ctxt.ccr3;
280 ccr[4] = getCx86(CX86_CCR4);
281 ccr[5] = getCx86(CX86_CCR5);
282 ccr[6] = getCx86(CX86_CCR6);
283
284 if (ccr[3] & 1) {
285 ccrc[3] = 1;
286 arr3_protected = 1;
287 } else {
288 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
289 * access to SMM memory through ARR3 (bit 7).
290 */
291 if (ccr[1] & 0x80) {
292 ccr[1] &= 0x7f;
293 ccrc[1] |= 0x80;
294 }
295 if (ccr[1] & 0x04) {
296 ccr[1] &= 0xfb;
297 ccrc[1] |= 0x04;
298 }
299 if (ccr[1] & 0x02) {
300 ccr[1] &= 0xfd;
301 ccrc[1] |= 0x02;
302 }
303 arr3_protected = 0;
304 if (ccr[6] & 0x02) {
305 ccr[6] &= 0xfd;
306 ccrc[6] = 1; /* Disable write protection of ARR3 */
307 setCx86(CX86_CCR6, ccr[6]);
308 }
309 /* Disable ARR3. This is safe now that we disabled SMM. */
310 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
311 }
312 /* If we changed CCR1 in memory, change it in the processor, too. */
313 if (ccrc[1])
314 setCx86(CX86_CCR1, ccr[1]);
315
316 /* Enable ARR usage by the processor */
317 if (!(ccr[5] & 0x20)) {
318 ccr[5] |= 0x20;
319 ccrc[5] = 1;
320 setCx86(CX86_CCR5, ccr[5]);
321 }
322#ifdef CONFIG_SMP
323 for (i = 0; i < 7; i++)
324 ccr_state[i] = ccr[i];
325 for (i = 0; i < 8; i++)
326 cyrix_get_arr(i,
327 &arr_state[i].base, &arr_state[i].size,
328 &arr_state[i].type);
329#endif
330
331 set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
332
333 if (ccrc[5])
334 printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
335 if (ccrc[3])
336 printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
337/*
338 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
339 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
340 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
341*/
342 if (ccrc[6])
343 printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
344}
345#endif
346
347static struct mtrr_ops cyrix_mtrr_ops = {
348 .vendor = X86_VENDOR_CYRIX,
349// .init = cyrix_arr_init,
350 .set_all = cyrix_set_all,
351 .set = cyrix_set_arr,
352 .get = cyrix_get_arr,
353 .get_free_region = cyrix_get_free_region,
354 .validate_add_page = generic_validate_add_page,
355 .have_wrcomb = positive_have_wrcomb,
356};
357
358int __init cyrix_init_mtrr(void)
359{
360 set_mtrr_ops(&cyrix_mtrr_ops);
361 return 0;
362}
363
364//arch_initcall(cyrix_init_mtrr);
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
new file mode 100644
index 000000000000..a4cce454d09b
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -0,0 +1,417 @@
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
6#include <asm/io.h>
7#include <asm/mtrr.h>
8#include <asm/msr.h>
9#include <asm/system.h>
10#include <asm/cpufeature.h>
11#include <asm/tlbflush.h>
12#include "mtrr.h"
13
14struct mtrr_state {
15 struct mtrr_var_range *var_ranges;
16 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
17 unsigned char enabled;
18 mtrr_type def_type;
19};
20
21static unsigned long smp_changes_mask;
22static struct mtrr_state mtrr_state = {};
23
24/* Get the MSR pair relating to a var range */
25static void __init
26get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
27{
28 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
29 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
30}
31
32static void __init
33get_fixed_ranges(mtrr_type * frs)
34{
35 unsigned int *p = (unsigned int *) frs;
36 int i;
37
38 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
39
40 for (i = 0; i < 2; i++)
41 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
42 for (i = 0; i < 8; i++)
43 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
44}
45
46/* Grab all of the MTRR state for this CPU into *state */
47void __init get_mtrr_state(void)
48{
49 unsigned int i;
50 struct mtrr_var_range *vrs;
51 unsigned lo, dummy;
52
53 if (!mtrr_state.var_ranges) {
54 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
55 GFP_KERNEL);
56 if (!mtrr_state.var_ranges)
57 return;
58 }
59 vrs = mtrr_state.var_ranges;
60
61 for (i = 0; i < num_var_ranges; i++)
62 get_mtrr_var_range(i, &vrs[i]);
63 get_fixed_ranges(mtrr_state.fixed_ranges);
64
65 rdmsr(MTRRdefType_MSR, lo, dummy);
66 mtrr_state.def_type = (lo & 0xff);
67 mtrr_state.enabled = (lo & 0xc00) >> 10;
68}
69
70/* Free resources associated with a struct mtrr_state */
71void __init finalize_mtrr_state(void)
72{
73 if (mtrr_state.var_ranges)
74 kfree(mtrr_state.var_ranges);
75 mtrr_state.var_ranges = NULL;
76}
77
78/* Some BIOS's are fucked and don't set all MTRRs the same! */
79void __init mtrr_state_warn(void)
80{
81 unsigned long mask = smp_changes_mask;
82
83 if (!mask)
84 return;
85 if (mask & MTRR_CHANGE_MASK_FIXED)
86 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
87 if (mask & MTRR_CHANGE_MASK_VARIABLE)
88 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
89 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
90 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
91 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
92 printk(KERN_INFO "mtrr: corrected configuration.\n");
93}
94
95/* Doesn't attempt to pass an error out to MTRR users
96 because it's quite complicated in some cases and probably not
97 worth it because the best error handling is to ignore it. */
98void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
99{
100 if (wrmsr_safe(msr, a, b) < 0)
101 printk(KERN_ERR
102 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
103 smp_processor_id(), msr, a, b);
104}
105
106int generic_get_free_region(unsigned long base, unsigned long size)
107/* [SUMMARY] Get a free MTRR.
108 <base> The starting (base) address of the region.
109 <size> The size (in bytes) of the region.
110 [RETURNS] The index of the region on success, else -1 on error.
111*/
112{
113 int i, max;
114 mtrr_type ltype;
115 unsigned long lbase;
116 unsigned lsize;
117
118 max = num_var_ranges;
119 for (i = 0; i < max; ++i) {
120 mtrr_if->get(i, &lbase, &lsize, &ltype);
121 if (lsize == 0)
122 return i;
123 }
124 return -ENOSPC;
125}
126
127void generic_get_mtrr(unsigned int reg, unsigned long *base,
128 unsigned int *size, mtrr_type * type)
129{
130 unsigned int mask_lo, mask_hi, base_lo, base_hi;
131
132 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
133 if ((mask_lo & 0x800) == 0) {
134 /* Invalid (i.e. free) range */
135 *base = 0;
136 *size = 0;
137 *type = 0;
138 return;
139 }
140
141 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
142
143 /* Work out the shifted address mask. */
144 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
145 | mask_lo >> PAGE_SHIFT;
146
147 /* This works correctly if size is a power of two, i.e. a
148 contiguous range. */
149 *size = -mask_lo;
150 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
151 *type = base_lo & 0xff;
152}
153
154static int set_fixed_ranges(mtrr_type * frs)
155{
156 unsigned int *p = (unsigned int *) frs;
157 int changed = FALSE;
158 int i;
159 unsigned int lo, hi;
160
161 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
162 if (p[0] != lo || p[1] != hi) {
163 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
164 changed = TRUE;
165 }
166
167 for (i = 0; i < 2; i++) {
168 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
169 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
170 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
171 p[3 + i * 2]);
172 changed = TRUE;
173 }
174 }
175
176 for (i = 0; i < 8; i++) {
177 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
178 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
179 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
180 p[7 + i * 2]);
181 changed = TRUE;
182 }
183 }
184 return changed;
185}
186
187/* Set the MSR pair relating to a var range. Returns TRUE if
188 changes are made */
189static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
190{
191 unsigned int lo, hi;
192 int changed = FALSE;
193
194 rdmsr(MTRRphysBase_MSR(index), lo, hi);
195 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
196 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
197 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
198 changed = TRUE;
199 }
200
201 rdmsr(MTRRphysMask_MSR(index), lo, hi);
202
203 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
204 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
205 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
206 changed = TRUE;
207 }
208 return changed;
209}
210
211static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
212/* [SUMMARY] Set the MTRR state for this CPU.
213 <state> The MTRR state information to read.
214 <ctxt> Some relevant CPU context.
215 [NOTE] The CPU must already be in a safe state for MTRR changes.
216 [RETURNS] 0 if no changes made, else a mask indication what was changed.
217*/
218{
219 unsigned int i;
220 unsigned long change_mask = 0;
221
222 for (i = 0; i < num_var_ranges; i++)
223 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
224 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
225
226 if (set_fixed_ranges(mtrr_state.fixed_ranges))
227 change_mask |= MTRR_CHANGE_MASK_FIXED;
228
229 /* Set_mtrr_restore restores the old value of MTRRdefType,
230 so to set it we fiddle with the saved value */
231 if ((deftype_lo & 0xff) != mtrr_state.def_type
232 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
233 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
234 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
235 }
236
237 return change_mask;
238}
239
240
241static unsigned long cr4 = 0;
242static u32 deftype_lo, deftype_hi;
243static DEFINE_SPINLOCK(set_atomicity_lock);
244
245/*
246 * Since we are disabling the cache don't allow any interrupts - they
247 * would run extremely slow and would only increase the pain. The caller must
248 * ensure that local interrupts are disabled and are reenabled after post_set()
249 * has been called.
250 */
251
252static void prepare_set(void)
253{
254 unsigned long cr0;
255
256 /* Note that this is not ideal, since the cache is only flushed/disabled
257 for this CPU while the MTRRs are changed, but changing this requires
258 more invasive changes to the way the kernel boots */
259
260 spin_lock(&set_atomicity_lock);
261
262 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
263 cr0 = read_cr0() | 0x40000000; /* set CD flag */
264 write_cr0(cr0);
265 wbinvd();
266
267 /* Save value of CR4 and clear Page Global Enable (bit 7) */
268 if ( cpu_has_pge ) {
269 cr4 = read_cr4();
270 write_cr4(cr4 & ~X86_CR4_PGE);
271 }
272
273 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
274 __flush_tlb();
275
276 /* Save MTRR state */
277 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
278
279 /* Disable MTRRs, and set the default type to uncached */
280 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
281}
282
283static void post_set(void)
284{
285 /* Flush TLBs (no need to flush caches - they are disabled) */
286 __flush_tlb();
287
288 /* Intel (P6) standard MTRRs */
289 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
290
291 /* Enable caches */
292 write_cr0(read_cr0() & 0xbfffffff);
293
294 /* Restore value of CR4 */
295 if ( cpu_has_pge )
296 write_cr4(cr4);
297 spin_unlock(&set_atomicity_lock);
298}
299
300static void generic_set_all(void)
301{
302 unsigned long mask, count;
303 unsigned long flags;
304
305 local_irq_save(flags);
306 prepare_set();
307
308 /* Actually set the state */
309 mask = set_mtrr_state(deftype_lo,deftype_hi);
310
311 post_set();
312 local_irq_restore(flags);
313
314 /* Use the atomic bitops to update the global mask */
315 for (count = 0; count < sizeof mask * 8; ++count) {
316 if (mask & 0x01)
317 set_bit(count, &smp_changes_mask);
318 mask >>= 1;
319 }
320
321}
322
323static void generic_set_mtrr(unsigned int reg, unsigned long base,
324 unsigned long size, mtrr_type type)
325/* [SUMMARY] Set variable MTRR register on the local CPU.
326 <reg> The register to set.
327 <base> The base address of the region.
328 <size> The size of the region. If this is 0 the region is disabled.
329 <type> The type of the region.
330 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
331 be done externally.
332 [RETURNS] Nothing.
333*/
334{
335 unsigned long flags;
336
337 local_irq_save(flags);
338 prepare_set();
339
340 if (size == 0) {
341 /* The invalid bit is kept in the mask, so we simply clear the
342 relevant mask register to disable a range. */
343 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
344 } else {
345 mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
346 (base & size_and_mask) >> (32 - PAGE_SHIFT));
347 mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
348 (-size & size_and_mask) >> (32 - PAGE_SHIFT));
349 }
350
351 post_set();
352 local_irq_restore(flags);
353}
354
355int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
356{
357 unsigned long lbase, last;
358
359 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
360 and not touch 0x70000000->0x7003FFFF */
361 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
362 boot_cpu_data.x86_model == 1 &&
363 boot_cpu_data.x86_mask <= 7) {
364 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
365 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
366 return -EINVAL;
367 }
368 if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
369 (type == MTRR_TYPE_WRCOMB
370 || type == MTRR_TYPE_WRBACK)) {
371 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
372 return -EINVAL;
373 }
374 }
375
376 if (base + size < 0x100) {
377 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
378 base, size);
379 return -EINVAL;
380 }
381 /* Check upper bits of base and last are equal and lower bits are 0
382 for base and 1 for last */
383 last = base + size - 1;
384 for (lbase = base; !(lbase & 1) && (last & 1);
385 lbase = lbase >> 1, last = last >> 1) ;
386 if (lbase != last) {
387 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
388 base, size);
389 return -EINVAL;
390 }
391 return 0;
392}
393
394
395static int generic_have_wrcomb(void)
396{
397 unsigned long config, dummy;
398 rdmsr(MTRRcap_MSR, config, dummy);
399 return (config & (1 << 10));
400}
401
402int positive_have_wrcomb(void)
403{
404 return 1;
405}
406
407/* generic structure...
408 */
409struct mtrr_ops generic_mtrr_ops = {
410 .use_intel_if = 1,
411 .set_all = generic_set_all,
412 .get = generic_get_mtrr,
413 .get_free_region = generic_get_free_region,
414 .set = generic_set_mtrr,
415 .validate_add_page = generic_validate_add_page,
416 .have_wrcomb = generic_have_wrcomb,
417};
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
new file mode 100644
index 000000000000..1923e0aed26a
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -0,0 +1,374 @@
1#include <linux/init.h>
2#include <linux/proc_fs.h>
3#include <linux/ctype.h>
4#include <linux/module.h>
5#include <linux/seq_file.h>
6#include <asm/uaccess.h>
7
8#define LINE_SIZE 80
9
10#include <asm/mtrr.h>
11#include "mtrr.h"
12
13/* RED-PEN: this is accessed without any locking */
14extern unsigned int *usage_table;
15
16
17#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
18
19static char *mtrr_strings[MTRR_NUM_TYPES] =
20{
21 "uncachable", /* 0 */
22 "write-combining", /* 1 */
23 "?", /* 2 */
24 "?", /* 3 */
25 "write-through", /* 4 */
26 "write-protect", /* 5 */
27 "write-back", /* 6 */
28};
29
30char *mtrr_attrib_to_str(int x)
31{
32 return (x <= 6) ? mtrr_strings[x] : "?";
33}
34
35#ifdef CONFIG_PROC_FS
36
37static int
38mtrr_file_add(unsigned long base, unsigned long size,
39 unsigned int type, char increment, struct file *file, int page)
40{
41 int reg, max;
42 unsigned int *fcount = FILE_FCOUNT(file);
43
44 max = num_var_ranges;
45 if (fcount == NULL) {
46 fcount = kmalloc(max * sizeof *fcount, GFP_KERNEL);
47 if (!fcount)
48 return -ENOMEM;
49 memset(fcount, 0, max * sizeof *fcount);
50 FILE_FCOUNT(file) = fcount;
51 }
52 if (!page) {
53 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
54 return -EINVAL;
55 base >>= PAGE_SHIFT;
56 size >>= PAGE_SHIFT;
57 }
58 reg = mtrr_add_page(base, size, type, 1);
59 if (reg >= 0)
60 ++fcount[reg];
61 return reg;
62}
63
64static int
65mtrr_file_del(unsigned long base, unsigned long size,
66 struct file *file, int page)
67{
68 int reg;
69 unsigned int *fcount = FILE_FCOUNT(file);
70
71 if (!page) {
72 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
73 return -EINVAL;
74 base >>= PAGE_SHIFT;
75 size >>= PAGE_SHIFT;
76 }
77 reg = mtrr_del_page(-1, base, size);
78 if (reg < 0)
79 return reg;
80 if (fcount == NULL)
81 return reg;
82 if (fcount[reg] < 1)
83 return -EINVAL;
84 --fcount[reg];
85 return reg;
86}
87
88/* RED-PEN: seq_file can seek now. this is ignored. */
89static ssize_t
90mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
91/* Format of control line:
92 "base=%Lx size=%Lx type=%s" OR:
93 "disable=%d"
94*/
95{
96 int i, err;
97 unsigned long reg;
98 unsigned long long base, size;
99 char *ptr;
100 char line[LINE_SIZE];
101 size_t linelen;
102
103 if (!capable(CAP_SYS_ADMIN))
104 return -EPERM;
105 if (!len)
106 return -EINVAL;
107 memset(line, 0, LINE_SIZE);
108 if (len > LINE_SIZE)
109 len = LINE_SIZE;
110 if (copy_from_user(line, buf, len - 1))
111 return -EFAULT;
112 linelen = strlen(line);
113 ptr = line + linelen - 1;
114 if (linelen && *ptr == '\n')
115 *ptr = '\0';
116 if (!strncmp(line, "disable=", 8)) {
117 reg = simple_strtoul(line + 8, &ptr, 0);
118 err = mtrr_del_page(reg, 0, 0);
119 if (err < 0)
120 return err;
121 return len;
122 }
123 if (strncmp(line, "base=", 5))
124 return -EINVAL;
125 base = simple_strtoull(line + 5, &ptr, 0);
126 for (; isspace(*ptr); ++ptr) ;
127 if (strncmp(ptr, "size=", 5))
128 return -EINVAL;
129 size = simple_strtoull(ptr + 5, &ptr, 0);
130 if ((base & 0xfff) || (size & 0xfff))
131 return -EINVAL;
132 for (; isspace(*ptr); ++ptr) ;
133 if (strncmp(ptr, "type=", 5))
134 return -EINVAL;
135 ptr += 5;
136 for (; isspace(*ptr); ++ptr) ;
137 for (i = 0; i < MTRR_NUM_TYPES; ++i) {
138 if (strcmp(ptr, mtrr_strings[i]))
139 continue;
140 base >>= PAGE_SHIFT;
141 size >>= PAGE_SHIFT;
142 err =
143 mtrr_add_page((unsigned long) base, (unsigned long) size, i,
144 1);
145 if (err < 0)
146 return err;
147 return len;
148 }
149 return -EINVAL;
150}
151
152static int
153mtrr_ioctl(struct inode *inode, struct file *file,
154 unsigned int cmd, unsigned long __arg)
155{
156 int err;
157 mtrr_type type;
158 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg;
161
162 switch (cmd) {
163 default:
164 return -ENOTTY;
165 case MTRRIOC_ADD_ENTRY:
166 if (!capable(CAP_SYS_ADMIN))
167 return -EPERM;
168 if (copy_from_user(&sentry, arg, sizeof sentry))
169 return -EFAULT;
170 err =
171 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
172 file, 0);
173 if (err < 0)
174 return err;
175 break;
176 case MTRRIOC_SET_ENTRY:
177 if (!capable(CAP_SYS_ADMIN))
178 return -EPERM;
179 if (copy_from_user(&sentry, arg, sizeof sentry))
180 return -EFAULT;
181 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
182 if (err < 0)
183 return err;
184 break;
185 case MTRRIOC_DEL_ENTRY:
186 if (!capable(CAP_SYS_ADMIN))
187 return -EPERM;
188 if (copy_from_user(&sentry, arg, sizeof sentry))
189 return -EFAULT;
190 err = mtrr_file_del(sentry.base, sentry.size, file, 0);
191 if (err < 0)
192 return err;
193 break;
194 case MTRRIOC_KILL_ENTRY:
195 if (!capable(CAP_SYS_ADMIN))
196 return -EPERM;
197 if (copy_from_user(&sentry, arg, sizeof sentry))
198 return -EFAULT;
199 err = mtrr_del(-1, sentry.base, sentry.size);
200 if (err < 0)
201 return err;
202 break;
203 case MTRRIOC_GET_ENTRY:
204 if (copy_from_user(&gentry, arg, sizeof gentry))
205 return -EFAULT;
206 if (gentry.regnum >= num_var_ranges)
207 return -EINVAL;
208 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
209
210 /* Hide entries that go above 4GB */
211 if (gentry.base + gentry.size > 0x100000
212 || gentry.size == 0x100000)
213 gentry.base = gentry.size = gentry.type = 0;
214 else {
215 gentry.base <<= PAGE_SHIFT;
216 gentry.size <<= PAGE_SHIFT;
217 gentry.type = type;
218 }
219
220 if (copy_to_user(arg, &gentry, sizeof gentry))
221 return -EFAULT;
222 break;
223 case MTRRIOC_ADD_PAGE_ENTRY:
224 if (!capable(CAP_SYS_ADMIN))
225 return -EPERM;
226 if (copy_from_user(&sentry, arg, sizeof sentry))
227 return -EFAULT;
228 err =
229 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
230 file, 1);
231 if (err < 0)
232 return err;
233 break;
234 case MTRRIOC_SET_PAGE_ENTRY:
235 if (!capable(CAP_SYS_ADMIN))
236 return -EPERM;
237 if (copy_from_user(&sentry, arg, sizeof sentry))
238 return -EFAULT;
239 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
240 if (err < 0)
241 return err;
242 break;
243 case MTRRIOC_DEL_PAGE_ENTRY:
244 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM;
246 if (copy_from_user(&sentry, arg, sizeof sentry))
247 return -EFAULT;
248 err = mtrr_file_del(sentry.base, sentry.size, file, 1);
249 if (err < 0)
250 return err;
251 break;
252 case MTRRIOC_KILL_PAGE_ENTRY:
253 if (!capable(CAP_SYS_ADMIN))
254 return -EPERM;
255 if (copy_from_user(&sentry, arg, sizeof sentry))
256 return -EFAULT;
257 err = mtrr_del_page(-1, sentry.base, sentry.size);
258 if (err < 0)
259 return err;
260 break;
261 case MTRRIOC_GET_PAGE_ENTRY:
262 if (copy_from_user(&gentry, arg, sizeof gentry))
263 return -EFAULT;
264 if (gentry.regnum >= num_var_ranges)
265 return -EINVAL;
266 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
267 gentry.type = type;
268
269 if (copy_to_user(arg, &gentry, sizeof gentry))
270 return -EFAULT;
271 break;
272 }
273 return 0;
274}
275
276static int
277mtrr_close(struct inode *ino, struct file *file)
278{
279 int i, max;
280 unsigned int *fcount = FILE_FCOUNT(file);
281
282 if (fcount != NULL) {
283 max = num_var_ranges;
284 for (i = 0; i < max; ++i) {
285 while (fcount[i] > 0) {
286 mtrr_del(i, 0, 0);
287 --fcount[i];
288 }
289 }
290 kfree(fcount);
291 FILE_FCOUNT(file) = NULL;
292 }
293 return single_release(ino, file);
294}
295
296static int mtrr_seq_show(struct seq_file *seq, void *offset);
297
298static int mtrr_open(struct inode *inode, struct file *file)
299{
300 if (!mtrr_if)
301 return -EIO;
302 if (!mtrr_if->get)
303 return -ENXIO;
304 return single_open(file, mtrr_seq_show, NULL);
305}
306
307static struct file_operations mtrr_fops = {
308 .owner = THIS_MODULE,
309 .open = mtrr_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .write = mtrr_write,
313 .ioctl = mtrr_ioctl,
314 .release = mtrr_close,
315};
316
317
318static struct proc_dir_entry *proc_root_mtrr;
319
320
321static int mtrr_seq_show(struct seq_file *seq, void *offset)
322{
323 char factor;
324 int i, max, len;
325 mtrr_type type;
326 unsigned long base;
327 unsigned int size;
328
329 len = 0;
330 max = num_var_ranges;
331 for (i = 0; i < max; i++) {
332 mtrr_if->get(i, &base, &size, &type);
333 if (size == 0)
334 usage_table[i] = 0;
335 else {
336 if (size < (0x100000 >> PAGE_SHIFT)) {
337 /* less than 1MB */
338 factor = 'K';
339 size <<= PAGE_SHIFT - 10;
340 } else {
341 factor = 'M';
342 size >>= 20 - PAGE_SHIFT;
343 }
344 /* RED-PEN: base can be > 32bit */
345 len += seq_printf(seq,
346 "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n",
347 i, base, base >> (20 - PAGE_SHIFT), size, factor,
348 mtrr_attrib_to_str(type), usage_table[i]);
349 }
350 }
351 return 0;
352}
353
354static int __init mtrr_if_init(void)
355{
356 struct cpuinfo_x86 *c = &boot_cpu_data;
357
358 if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
359 (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
360 (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
361 (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
362 return -ENODEV;
363
364 proc_root_mtrr =
365 create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root);
366 if (proc_root_mtrr) {
367 proc_root_mtrr->owner = THIS_MODULE;
368 proc_root_mtrr->proc_fops = &mtrr_fops;
369 }
370 return 0;
371}
372
373arch_initcall(mtrr_if_init);
374#endif /* CONFIG_PROC_FS */
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
new file mode 100644
index 000000000000..8f67b490a7fd
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -0,0 +1,693 @@
1/* Generic MTRR (Memory Type Range Register) driver.
2
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
15
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
26 section 11.11.7
27
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
29 on 6-7 March 2002.
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
32*/
33
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/pci.h>
37#include <linux/smp.h>
38#include <linux/cpu.h>
39
40#include <asm/mtrr.h>
41
42#include <asm/uaccess.h>
43#include <asm/processor.h>
44#include <asm/msr.h>
45#include "mtrr.h"
46
47#define MTRR_VERSION "2.0 (20020519)"
48
49u32 num_var_ranges = 0;
50
51unsigned int *usage_table;
52static DECLARE_MUTEX(main_lock);
53
54u32 size_or_mask, size_and_mask;
55
56static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
57
58struct mtrr_ops * mtrr_if = NULL;
59
60static void set_mtrr(unsigned int reg, unsigned long base,
61 unsigned long size, mtrr_type type);
62
63extern int arr3_protected;
64
65void set_mtrr_ops(struct mtrr_ops * ops)
66{
67 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
68 mtrr_ops[ops->vendor] = ops;
69}
70
71/* Returns non-zero if we have the write-combining memory type */
72static int have_wrcomb(void)
73{
74 struct pci_dev *dev;
75
76 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
77 /* ServerWorks LE chipsets have problems with write-combining
78 Don't allow it and leave room for other chipsets to be tagged */
79 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
80 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
81 printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
82 pci_dev_put(dev);
83 return 0;
84 }
85 /* Intel 450NX errata # 23. Non ascending cachline evictions to
86 write combining memory may resulting in data corruption */
87 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
88 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
89 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
90 pci_dev_put(dev);
91 return 0;
92 }
93 pci_dev_put(dev);
94 }
95 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
96}
97
98/* This function returns the number of variable MTRRs */
99static void __init set_num_var_ranges(void)
100{
101 unsigned long config = 0, dummy;
102
103 if (use_intel()) {
104 rdmsr(MTRRcap_MSR, config, dummy);
105 } else if (is_cpu(AMD))
106 config = 2;
107 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
108 config = 8;
109 num_var_ranges = config & 0xff;
110}
111
112static void __init init_table(void)
113{
114 int i, max;
115
116 max = num_var_ranges;
117 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
118 == NULL) {
119 printk(KERN_ERR "mtrr: could not allocate\n");
120 return;
121 }
122 for (i = 0; i < max; i++)
123 usage_table[i] = 1;
124}
125
126struct set_mtrr_data {
127 atomic_t count;
128 atomic_t gate;
129 unsigned long smp_base;
130 unsigned long smp_size;
131 unsigned int smp_reg;
132 mtrr_type smp_type;
133};
134
135#ifdef CONFIG_SMP
136
137static void ipi_handler(void *info)
138/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
139 [RETURNS] Nothing.
140*/
141{
142 struct set_mtrr_data *data = info;
143 unsigned long flags;
144
145 local_irq_save(flags);
146
147 atomic_dec(&data->count);
148 while(!atomic_read(&data->gate))
149 cpu_relax();
150
151 /* The master has cleared me to execute */
152 if (data->smp_reg != ~0U)
153 mtrr_if->set(data->smp_reg, data->smp_base,
154 data->smp_size, data->smp_type);
155 else
156 mtrr_if->set_all();
157
158 atomic_dec(&data->count);
159 while(atomic_read(&data->gate))
160 cpu_relax();
161
162 atomic_dec(&data->count);
163 local_irq_restore(flags);
164}
165
166#endif
167
168/**
169 * set_mtrr - update mtrrs on all processors
170 * @reg: mtrr in question
171 * @base: mtrr base
172 * @size: mtrr size
173 * @type: mtrr type
174 *
175 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
176 *
177 * 1. Send IPI to do the following:
178 * 2. Disable Interrupts
179 * 3. Wait for all procs to do so
180 * 4. Enter no-fill cache mode
181 * 5. Flush caches
182 * 6. Clear PGE bit
183 * 7. Flush all TLBs
184 * 8. Disable all range registers
185 * 9. Update the MTRRs
186 * 10. Enable all range registers
187 * 11. Flush all TLBs and caches again
188 * 12. Enter normal cache mode and reenable caching
189 * 13. Set PGE
190 * 14. Wait for buddies to catch up
191 * 15. Enable interrupts.
192 *
193 * What does that mean for us? Well, first we set data.count to the number
194 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
195 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
196 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
197 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
198 * differently, so we call mtrr_if->set() callback and let them take care of it.
199 * When they're done, they again decrement data->count and wait for data.gate to
200 * be reset.
201 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
202 * Everyone then enables interrupts and we all continue on.
203 *
204 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
205 * becomes nops.
206 */
207static void set_mtrr(unsigned int reg, unsigned long base,
208 unsigned long size, mtrr_type type)
209{
210 struct set_mtrr_data data;
211 unsigned long flags;
212
213 data.smp_reg = reg;
214 data.smp_base = base;
215 data.smp_size = size;
216 data.smp_type = type;
217 atomic_set(&data.count, num_booting_cpus() - 1);
218 atomic_set(&data.gate,0);
219
220 /* Start the ball rolling on other CPUs */
221 if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
222 panic("mtrr: timed out waiting for other CPUs\n");
223
224 local_irq_save(flags);
225
226 while(atomic_read(&data.count))
227 cpu_relax();
228
229 /* ok, reset count and toggle gate */
230 atomic_set(&data.count, num_booting_cpus() - 1);
231 atomic_set(&data.gate,1);
232
233 /* do our MTRR business */
234
235 /* HACK!
236 * We use this same function to initialize the mtrrs on boot.
237 * The state of the boot cpu's mtrrs has been saved, and we want
238 * to replicate across all the APs.
239 * If we're doing that @reg is set to something special...
240 */
241 if (reg != ~0U)
242 mtrr_if->set(reg,base,size,type);
243
244 /* wait for the others */
245 while(atomic_read(&data.count))
246 cpu_relax();
247
248 atomic_set(&data.count, num_booting_cpus() - 1);
249 atomic_set(&data.gate,0);
250
251 /*
252 * Wait here for everyone to have seen the gate change
253 * So we're the last ones to touch 'data'
254 */
255 while(atomic_read(&data.count))
256 cpu_relax();
257
258 local_irq_restore(flags);
259}
260
261/**
262 * mtrr_add_page - Add a memory type region
263 * @base: Physical base address of region in pages (4 KB)
264 * @size: Physical size of region in pages (4 KB)
265 * @type: Type of MTRR desired
266 * @increment: If this is true do usage counting on the region
267 *
268 * Memory type region registers control the caching on newer Intel and
269 * non Intel processors. This function allows drivers to request an
270 * MTRR is added. The details and hardware specifics of each processor's
271 * implementation are hidden from the caller, but nevertheless the
272 * caller should expect to need to provide a power of two size on an
273 * equivalent power of two boundary.
274 *
275 * If the region cannot be added either because all regions are in use
276 * or the CPU cannot support it a negative value is returned. On success
277 * the register number for this entry is returned, but should be treated
278 * as a cookie only.
279 *
280 * On a multiprocessor machine the changes are made to all processors.
281 * This is required on x86 by the Intel processors.
282 *
283 * The available types are
284 *
285 * %MTRR_TYPE_UNCACHABLE - No caching
286 *
287 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
288 *
289 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
290 *
291 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
292 *
293 * BUGS: Needs a quiet flag for the cases where drivers do not mind
294 * failures and do not wish system log messages to be sent.
295 */
296
297int mtrr_add_page(unsigned long base, unsigned long size,
298 unsigned int type, char increment)
299{
300 int i;
301 mtrr_type ltype;
302 unsigned long lbase;
303 unsigned int lsize;
304 int error;
305
306 if (!mtrr_if)
307 return -ENXIO;
308
309 if ((error = mtrr_if->validate_add_page(base,size,type)))
310 return error;
311
312 if (type >= MTRR_NUM_TYPES) {
313 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
314 return -EINVAL;
315 }
316
317 /* If the type is WC, check that this processor supports it */
318 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
319 printk(KERN_WARNING
320 "mtrr: your processor doesn't support write-combining\n");
321 return -ENOSYS;
322 }
323
324 if (base & size_or_mask || size & size_or_mask) {
325 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
326 return -EINVAL;
327 }
328
329 error = -EINVAL;
330
331 /* Search for existing MTRR */
332 down(&main_lock);
333 for (i = 0; i < num_var_ranges; ++i) {
334 mtrr_if->get(i, &lbase, &lsize, &ltype);
335 if (base >= lbase + lsize)
336 continue;
337 if ((base < lbase) && (base + size <= lbase))
338 continue;
339 /* At this point we know there is some kind of overlap/enclosure */
340 if ((base < lbase) || (base + size > lbase + lsize)) {
341 printk(KERN_WARNING
342 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
343 " 0x%lx000,0x%x000\n", base, size, lbase,
344 lsize);
345 goto out;
346 }
347 /* New region is enclosed by an existing region */
348 if (ltype != type) {
349 if (type == MTRR_TYPE_UNCACHABLE)
350 continue;
351 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
352 base, size, mtrr_attrib_to_str(ltype),
353 mtrr_attrib_to_str(type));
354 goto out;
355 }
356 if (increment)
357 ++usage_table[i];
358 error = i;
359 goto out;
360 }
361 /* Search for an empty MTRR */
362 i = mtrr_if->get_free_region(base, size);
363 if (i >= 0) {
364 set_mtrr(i, base, size, type);
365 usage_table[i] = 1;
366 } else
367 printk(KERN_INFO "mtrr: no more MTRRs available\n");
368 error = i;
369 out:
370 up(&main_lock);
371 return error;
372}
373
374/**
375 * mtrr_add - Add a memory type region
376 * @base: Physical base address of region
377 * @size: Physical size of region
378 * @type: Type of MTRR desired
379 * @increment: If this is true do usage counting on the region
380 *
381 * Memory type region registers control the caching on newer Intel and
382 * non Intel processors. This function allows drivers to request an
383 * MTRR is added. The details and hardware specifics of each processor's
384 * implementation are hidden from the caller, but nevertheless the
385 * caller should expect to need to provide a power of two size on an
386 * equivalent power of two boundary.
387 *
388 * If the region cannot be added either because all regions are in use
389 * or the CPU cannot support it a negative value is returned. On success
390 * the register number for this entry is returned, but should be treated
391 * as a cookie only.
392 *
393 * On a multiprocessor machine the changes are made to all processors.
394 * This is required on x86 by the Intel processors.
395 *
396 * The available types are
397 *
398 * %MTRR_TYPE_UNCACHABLE - No caching
399 *
400 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
401 *
402 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
403 *
404 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
405 *
406 * BUGS: Needs a quiet flag for the cases where drivers do not mind
407 * failures and do not wish system log messages to be sent.
408 */
409
410int
411mtrr_add(unsigned long base, unsigned long size, unsigned int type,
412 char increment)
413{
414 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
415 printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
416 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
417 return -EINVAL;
418 }
419 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
420 increment);
421}
422
423/**
424 * mtrr_del_page - delete a memory type region
425 * @reg: Register returned by mtrr_add
426 * @base: Physical base address
427 * @size: Size of region
428 *
429 * If register is supplied then base and size are ignored. This is
430 * how drivers should call it.
431 *
432 * Releases an MTRR region. If the usage count drops to zero the
433 * register is freed and the region returns to default state.
434 * On success the register is returned, on failure a negative error
435 * code.
436 */
437
438int mtrr_del_page(int reg, unsigned long base, unsigned long size)
439{
440 int i, max;
441 mtrr_type ltype;
442 unsigned long lbase;
443 unsigned int lsize;
444 int error = -EINVAL;
445
446 if (!mtrr_if)
447 return -ENXIO;
448
449 max = num_var_ranges;
450 down(&main_lock);
451 if (reg < 0) {
452 /* Search for existing MTRR */
453 for (i = 0; i < max; ++i) {
454 mtrr_if->get(i, &lbase, &lsize, &ltype);
455 if (lbase == base && lsize == size) {
456 reg = i;
457 break;
458 }
459 }
460 if (reg < 0) {
461 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
462 size);
463 goto out;
464 }
465 }
466 if (reg >= max) {
467 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
468 goto out;
469 }
470 if (is_cpu(CYRIX) && !use_intel()) {
471 if ((reg == 3) && arr3_protected) {
472 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
473 goto out;
474 }
475 }
476 mtrr_if->get(reg, &lbase, &lsize, &ltype);
477 if (lsize < 1) {
478 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
479 goto out;
480 }
481 if (usage_table[reg] < 1) {
482 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
483 goto out;
484 }
485 if (--usage_table[reg] < 1)
486 set_mtrr(reg, 0, 0, 0);
487 error = reg;
488 out:
489 up(&main_lock);
490 return error;
491}
492/**
493 * mtrr_del - delete a memory type region
494 * @reg: Register returned by mtrr_add
495 * @base: Physical base address
496 * @size: Size of region
497 *
498 * If register is supplied then base and size are ignored. This is
499 * how drivers should call it.
500 *
501 * Releases an MTRR region. If the usage count drops to zero the
502 * register is freed and the region returns to default state.
503 * On success the register is returned, on failure a negative error
504 * code.
505 */
506
507int
508mtrr_del(int reg, unsigned long base, unsigned long size)
509{
510 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
511 printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
512 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
513 return -EINVAL;
514 }
515 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
516}
517
518EXPORT_SYMBOL(mtrr_add);
519EXPORT_SYMBOL(mtrr_del);
520
521/* HACK ALERT!
522 * These should be called implicitly, but we can't yet until all the initcall
523 * stuff is done...
524 */
525extern void amd_init_mtrr(void);
526extern void cyrix_init_mtrr(void);
527extern void centaur_init_mtrr(void);
528
529static void __init init_ifs(void)
530{
531 amd_init_mtrr();
532 cyrix_init_mtrr();
533 centaur_init_mtrr();
534}
535
536static void __init init_other_cpus(void)
537{
538 if (use_intel())
539 get_mtrr_state();
540
541 /* bring up the other processors */
542 set_mtrr(~0U,0,0,0);
543
544 if (use_intel()) {
545 finalize_mtrr_state();
546 mtrr_state_warn();
547 }
548}
549
550
551struct mtrr_value {
552 mtrr_type ltype;
553 unsigned long lbase;
554 unsigned int lsize;
555};
556
557static struct mtrr_value * mtrr_state;
558
559static int mtrr_save(struct sys_device * sysdev, u32 state)
560{
561 int i;
562 int size = num_var_ranges * sizeof(struct mtrr_value);
563
564 mtrr_state = kmalloc(size,GFP_ATOMIC);
565 if (mtrr_state)
566 memset(mtrr_state,0,size);
567 else
568 return -ENOMEM;
569
570 for (i = 0; i < num_var_ranges; i++) {
571 mtrr_if->get(i,
572 &mtrr_state[i].lbase,
573 &mtrr_state[i].lsize,
574 &mtrr_state[i].ltype);
575 }
576 return 0;
577}
578
579static int mtrr_restore(struct sys_device * sysdev)
580{
581 int i;
582
583 for (i = 0; i < num_var_ranges; i++) {
584 if (mtrr_state[i].lsize)
585 set_mtrr(i,
586 mtrr_state[i].lbase,
587 mtrr_state[i].lsize,
588 mtrr_state[i].ltype);
589 }
590 kfree(mtrr_state);
591 return 0;
592}
593
594
595
596static struct sysdev_driver mtrr_sysdev_driver = {
597 .suspend = mtrr_save,
598 .resume = mtrr_restore,
599};
600
601
602/**
603 * mtrr_init - initialize mtrrs on the boot CPU
604 *
605 * This needs to be called early; before any of the other CPUs are
606 * initialized (i.e. before smp_init()).
607 *
608 */
609static int __init mtrr_init(void)
610{
611 init_ifs();
612
613 if (cpu_has_mtrr) {
614 mtrr_if = &generic_mtrr_ops;
615 size_or_mask = 0xff000000; /* 36 bits */
616 size_and_mask = 0x00f00000;
617
618 switch (boot_cpu_data.x86_vendor) {
619 case X86_VENDOR_AMD:
620 /* The original Athlon docs said that
621 total addressable memory is 44 bits wide.
622 It was not really clear whether its MTRRs
623 follow this or not. (Read: 44 or 36 bits).
624 However, "x86-64_overview.pdf" explicitly
625 states that "previous implementations support
626 36 bit MTRRs" and also provides a way to
627 query the width (in bits) of the physical
628 addressable memory on the Hammer family.
629 */
630 if (boot_cpu_data.x86 == 15
631 && (cpuid_eax(0x80000000) >= 0x80000008)) {
632 u32 phys_addr;
633 phys_addr = cpuid_eax(0x80000008) & 0xff;
634 size_or_mask =
635 ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
636 size_and_mask = ~size_or_mask & 0xfff00000;
637 }
638 /* Athlon MTRRs use an Intel-compatible interface for
639 * getting and setting */
640 break;
641 case X86_VENDOR_CENTAUR:
642 if (boot_cpu_data.x86 == 6) {
643 /* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
644 size_or_mask = 0xfff00000; /* 32 bits */
645 size_and_mask = 0;
646 }
647 break;
648
649 default:
650 break;
651 }
652 } else {
653 switch (boot_cpu_data.x86_vendor) {
654 case X86_VENDOR_AMD:
655 if (cpu_has_k6_mtrr) {
656 /* Pre-Athlon (K6) AMD CPU MTRRs */
657 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
658 size_or_mask = 0xfff00000; /* 32 bits */
659 size_and_mask = 0;
660 }
661 break;
662 case X86_VENDOR_CENTAUR:
663 if (cpu_has_centaur_mcr) {
664 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
665 size_or_mask = 0xfff00000; /* 32 bits */
666 size_and_mask = 0;
667 }
668 break;
669 case X86_VENDOR_CYRIX:
670 if (cpu_has_cyrix_arr) {
671 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
672 size_or_mask = 0xfff00000; /* 32 bits */
673 size_and_mask = 0;
674 }
675 break;
676 default:
677 break;
678 }
679 }
680 printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
681
682 if (mtrr_if) {
683 set_num_var_ranges();
684 init_table();
685 init_other_cpus();
686
687 return sysdev_driver_register(&cpu_sysdev_class,
688 &mtrr_sysdev_driver);
689 }
690 return -ENXIO;
691}
692
693subsys_initcall(mtrr_init);
diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h
new file mode 100644
index 000000000000..de1351245599
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/mtrr.h
@@ -0,0 +1,98 @@
1/*
2 * local mtrr defines.
3 */
4
5#ifndef TRUE
6#define TRUE 1
7#define FALSE 0
8#endif
9
10#define MTRRcap_MSR 0x0fe
11#define MTRRdefType_MSR 0x2ff
12
13#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
14#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
15
16#define NUM_FIXED_RANGES 88
17#define MTRRfix64K_00000_MSR 0x250
18#define MTRRfix16K_80000_MSR 0x258
19#define MTRRfix16K_A0000_MSR 0x259
20#define MTRRfix4K_C0000_MSR 0x268
21#define MTRRfix4K_C8000_MSR 0x269
22#define MTRRfix4K_D0000_MSR 0x26a
23#define MTRRfix4K_D8000_MSR 0x26b
24#define MTRRfix4K_E0000_MSR 0x26c
25#define MTRRfix4K_E8000_MSR 0x26d
26#define MTRRfix4K_F0000_MSR 0x26e
27#define MTRRfix4K_F8000_MSR 0x26f
28
29#define MTRR_CHANGE_MASK_FIXED 0x01
30#define MTRR_CHANGE_MASK_VARIABLE 0x02
31#define MTRR_CHANGE_MASK_DEFTYPE 0x04
32
33/* In the Intel processor's MTRR interface, the MTRR type is always held in
34 an 8 bit field: */
35typedef u8 mtrr_type;
36
37struct mtrr_ops {
38 u32 vendor;
39 u32 use_intel_if;
40// void (*init)(void);
41 void (*set)(unsigned int reg, unsigned long base,
42 unsigned long size, mtrr_type type);
43 void (*set_all)(void);
44
45 void (*get)(unsigned int reg, unsigned long *base,
46 unsigned int *size, mtrr_type * type);
47 int (*get_free_region) (unsigned long base, unsigned long size);
48
49 int (*validate_add_page)(unsigned long base, unsigned long size,
50 unsigned int type);
51 int (*have_wrcomb)(void);
52};
53
54extern int generic_get_free_region(unsigned long base, unsigned long size);
55extern int generic_validate_add_page(unsigned long base, unsigned long size,
56 unsigned int type);
57
58extern struct mtrr_ops generic_mtrr_ops;
59
60extern int positive_have_wrcomb(void);
61
62/* library functions for processor-specific routines */
63struct set_mtrr_context {
64 unsigned long flags;
65 unsigned long deftype_lo;
66 unsigned long deftype_hi;
67 unsigned long cr4val;
68 unsigned long ccr3;
69};
70
71struct mtrr_var_range {
72 unsigned long base_lo;
73 unsigned long base_hi;
74 unsigned long mask_lo;
75 unsigned long mask_hi;
76};
77
78void set_mtrr_done(struct set_mtrr_context *ctxt);
79void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
80void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
81
82void get_mtrr_state(void);
83
84extern void set_mtrr_ops(struct mtrr_ops * ops);
85
86extern u32 size_or_mask, size_and_mask;
87extern struct mtrr_ops * mtrr_if;
88
89#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
90#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
91
92extern unsigned int num_var_ranges;
93
94void finalize_mtrr_state(void);
95void mtrr_state_warn(void);
96char *mtrr_attrib_to_str(int x);
97void mtrr_wrmsr(unsigned, unsigned, unsigned);
98
diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c
new file mode 100644
index 000000000000..f62ecd15811a
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/state.c
@@ -0,0 +1,78 @@
1#include <linux/mm.h>
2#include <linux/init.h>
3#include <asm/io.h>
4#include <asm/mtrr.h>
5#include <asm/msr.h>
6#include "mtrr.h"
7
8
9/* Put the processor into a state where MTRRs can be safely set */
10void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
11{
12 unsigned int cr0;
13
14 /* Disable interrupts locally */
15 local_irq_save(ctxt->flags);
16
17 if (use_intel() || is_cpu(CYRIX)) {
18
19 /* Save value of CR4 and clear Page Global Enable (bit 7) */
20 if ( cpu_has_pge ) {
21 ctxt->cr4val = read_cr4();
22 write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7));
23 }
24
25 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
26 a side-effect */
27 cr0 = read_cr0() | 0x40000000;
28 wbinvd();
29 write_cr0(cr0);
30 wbinvd();
31
32 if (use_intel())
33 /* Save MTRR state */
34 rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
35 else
36 /* Cyrix ARRs - everything else were excluded at the top */
37 ctxt->ccr3 = getCx86(CX86_CCR3);
38 }
39}
40
41void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
42{
43 if (use_intel())
44 /* Disable MTRRs, and set the default type to uncached */
45 mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
46 ctxt->deftype_hi);
47 else if (is_cpu(CYRIX))
48 /* Cyrix ARRs - everything else were excluded at the top */
49 setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
50}
51
52/* Restore the processor after a set_mtrr_prepare */
53void set_mtrr_done(struct set_mtrr_context *ctxt)
54{
55 if (use_intel() || is_cpu(CYRIX)) {
56
57 /* Flush caches and TLBs */
58 wbinvd();
59
60 /* Restore MTRRdefType */
61 if (use_intel())
62 /* Intel (P6) standard MTRRs */
63 mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
64 else
65 /* Cyrix ARRs - everything else was excluded at the top */
66 setCx86(CX86_CCR3, ctxt->ccr3);
67
68 /* Enable caches */
69 write_cr0(read_cr0() & 0xbfffffff);
70
71 /* Restore value of CR4 */
72 if ( cpu_has_pge )
73 write_cr4(ctxt->cr4val);
74 }
75 /* Re-enable interrupts locally (if enabled previously) */
76 local_irq_restore(ctxt->flags);
77}
78