aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:16:28 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:16:28 -0400
commit2ec1df4130c60d1eb49dc0fa0ed15858fede6b05 (patch)
tree97e578ba1546770eadbe84cff2dc44256f97b9d7 /arch/i386
parentee580dc91efd83e6b55955e7261e8ad2a0e08d1a (diff)
i386: move kernel/cpu/mtrr
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/cpu/Makefile2
-rw-r--r--arch/i386/kernel/cpu/mtrr/Makefile3
-rw-r--r--arch/i386/kernel/cpu/mtrr/amd.c121
-rw-r--r--arch/i386/kernel/cpu/mtrr/centaur.c224
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c380
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c509
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c439
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c768
-rw-r--r--arch/i386/kernel/cpu/mtrr/mtrr.h98
-rw-r--r--arch/i386/kernel/cpu/mtrr/state.c79
10 files changed, 1 insertions, 2622 deletions
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
index 8d9ce0232ada..6687f6d5ad2f 100644
--- a/arch/i386/kernel/cpu/Makefile
+++ b/arch/i386/kernel/cpu/Makefile
@@ -14,7 +14,7 @@ obj-y += umc.o
14 14
15obj-$(CONFIG_X86_MCE) += ../../../x86/kernel/cpu/mcheck/ 15obj-$(CONFIG_X86_MCE) += ../../../x86/kernel/cpu/mcheck/
16 16
17obj-$(CONFIG_MTRR) += mtrr/ 17obj-$(CONFIG_MTRR) += ../../../x86/kernel/cpu/mtrr/
18obj-$(CONFIG_CPU_FREQ) += ../../../x86/kernel/cpu/cpufreq/ 18obj-$(CONFIG_CPU_FREQ) += ../../../x86/kernel/cpu/cpufreq/
19 19
20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
deleted file mode 100644
index 191fc0533649..000000000000
--- a/arch/i386/kernel/cpu/mtrr/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-y := main.o if.o generic.o state.o
2obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
3
diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c
deleted file mode 100644
index 0949cdbf848a..000000000000
--- a/arch/i386/kernel/cpu/mtrr/amd.c
+++ /dev/null
@@ -1,121 +0,0 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5
6#include "mtrr.h"
7
8static void
9amd_get_mtrr(unsigned int reg, unsigned long *base,
10 unsigned long *size, mtrr_type * type)
11{
12 unsigned long low, high;
13
14 rdmsr(MSR_K6_UWCCR, low, high);
15 /* Upper dword is region 1, lower is region 0 */
16 if (reg == 1)
17 low = high;
18 /* The base masks off on the right alignment */
19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
20 *type = 0;
21 if (low & 1)
22 *type = MTRR_TYPE_UNCACHABLE;
23 if (low & 2)
24 *type = MTRR_TYPE_WRCOMB;
25 if (!(low & 3)) {
26 *size = 0;
27 return;
28 }
29 /*
30 * This needs a little explaining. The size is stored as an
31 * inverted mask of bits of 128K granularity 15 bits long offset
32 * 2 bits
33 *
34 * So to get a size we do invert the mask and add 1 to the lowest
35 * mask bit (4 as its 2 bits in). This gives us a size we then shift
36 * to turn into 128K blocks
37 *
38 * eg 111 1111 1111 1100 is 512K
39 *
40 * invert 000 0000 0000 0011
41 * +1 000 0000 0000 0100
42 * *128K ...
43 */
44 low = (~low) & 0x1FFFC;
45 *size = (low + 4) << (15 - PAGE_SHIFT);
46 return;
47}
48
49static void amd_set_mtrr(unsigned int reg, unsigned long base,
50 unsigned long size, mtrr_type type)
51/* [SUMMARY] Set variable MTRR register on the local CPU.
52 <reg> The register to set.
53 <base> The base address of the region.
54 <size> The size of the region. If this is 0 the region is disabled.
55 <type> The type of the region.
56 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
57 be done externally.
58 [RETURNS] Nothing.
59*/
60{
61 u32 regs[2];
62
63 /*
64 * Low is MTRR0 , High MTRR 1
65 */
66 rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
67 /*
68 * Blank to disable
69 */
70 if (size == 0)
71 regs[reg] = 0;
72 else
73 /* Set the register to the base, the type (off by one) and an
74 inverted bitmask of the size The size is the only odd
75 bit. We are fed say 512K We invert this and we get 111 1111
76 1111 1011 but if you subtract one and invert you get the
77 desired 111 1111 1111 1100 mask
78
79 But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
80 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
81 | (base << PAGE_SHIFT) | (type + 1);
82
83 /*
84 * The writeback rule is quite specific. See the manual. Its
85 * disable local interrupts, write back the cache, set the mtrr
86 */
87 wbinvd();
88 wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
89}
90
91static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
92{
93 /* Apply the K6 block alignment and size rules
94 In order
95 o Uncached or gathering only
96 o 128K or bigger block
97 o Power of 2 block
98 o base suitably aligned to the power
99 */
100 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
101 || (size & ~(size - 1)) - size || (base & (size - 1)))
102 return -EINVAL;
103 return 0;
104}
105
106static struct mtrr_ops amd_mtrr_ops = {
107 .vendor = X86_VENDOR_AMD,
108 .set = amd_set_mtrr,
109 .get = amd_get_mtrr,
110 .get_free_region = generic_get_free_region,
111 .validate_add_page = amd_validate_add_page,
112 .have_wrcomb = positive_have_wrcomb,
113};
114
115int __init amd_init_mtrr(void)
116{
117 set_mtrr_ops(&amd_mtrr_ops);
118 return 0;
119}
120
121//arch_initcall(amd_mtrr_init);
diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c
deleted file mode 100644
index cb9aa3a7a7ab..000000000000
--- a/arch/i386/kernel/cpu/mtrr/centaur.c
+++ /dev/null
@@ -1,224 +0,0 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5#include "mtrr.h"
6
7static struct {
8 unsigned long high;
9 unsigned long low;
10} centaur_mcr[8];
11
12static u8 centaur_mcr_reserved;
13static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
14
15/*
16 * Report boot time MCR setups
17 */
18
19static int
20centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
21/* [SUMMARY] Get a free MTRR.
22 <base> The starting (base) address of the region.
23 <size> The size (in bytes) of the region.
24 [RETURNS] The index of the region on success, else -1 on error.
25*/
26{
27 int i, max;
28 mtrr_type ltype;
29 unsigned long lbase, lsize;
30
31 max = num_var_ranges;
32 if (replace_reg >= 0 && replace_reg < max)
33 return replace_reg;
34 for (i = 0; i < max; ++i) {
35 if (centaur_mcr_reserved & (1 << i))
36 continue;
37 mtrr_if->get(i, &lbase, &lsize, &ltype);
38 if (lsize == 0)
39 return i;
40 }
41 return -ENOSPC;
42}
43
44void
45mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
46{
47 centaur_mcr[mcr].low = lo;
48 centaur_mcr[mcr].high = hi;
49}
50
51static void
52centaur_get_mcr(unsigned int reg, unsigned long *base,
53 unsigned long *size, mtrr_type * type)
54{
55 *base = centaur_mcr[reg].high >> PAGE_SHIFT;
56 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
57 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
58 if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
59 *type = MTRR_TYPE_UNCACHABLE;
60 if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
61 *type = MTRR_TYPE_WRBACK;
62 if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
63 *type = MTRR_TYPE_WRBACK;
64
65}
66
67static void centaur_set_mcr(unsigned int reg, unsigned long base,
68 unsigned long size, mtrr_type type)
69{
70 unsigned long low, high;
71
72 if (size == 0) {
73 /* Disable */
74 high = low = 0;
75 } else {
76 high = base << PAGE_SHIFT;
77 if (centaur_mcr_type == 0)
78 low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
79 else {
80 if (type == MTRR_TYPE_UNCACHABLE)
81 low = -size << PAGE_SHIFT | 0x02; /* NC */
82 else
83 low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */
84 }
85 }
86 centaur_mcr[reg].high = high;
87 centaur_mcr[reg].low = low;
88 wrmsr(MSR_IDT_MCR0 + reg, low, high);
89}
90
91#if 0
92/*
93 * Initialise the later (saner) Winchip MCR variant. In this version
94 * the BIOS can pass us the registers it has used (but not their values)
95 * and the control register is read/write
96 */
97
98static void __init
99centaur_mcr1_init(void)
100{
101 unsigned i;
102 u32 lo, hi;
103
104 /* Unfortunately, MCR's are read-only, so there is no way to
105 * find out what the bios might have done.
106 */
107
108 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
109 if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */
110 lo &= ~0x1C0; /* clear key */
111 lo |= 0x040; /* set key to 1 */
112 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */
113 }
114
115 centaur_mcr_type = 1;
116
117 /*
118 * Clear any unconfigured MCR's.
119 */
120
121 for (i = 0; i < 8; ++i) {
122 if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
123 if (!(lo & (1 << (9 + i))))
124 wrmsr(MSR_IDT_MCR0 + i, 0, 0);
125 else
126 /*
127 * If the BIOS set up an MCR we cannot see it
128 * but we don't wish to obliterate it
129 */
130 centaur_mcr_reserved |= (1 << i);
131 }
132 }
133 /*
134 * Throw the main write-combining switch...
135 * However if OOSTORE is enabled then people have already done far
136 * cleverer things and we should behave.
137 */
138
139 lo |= 15; /* Write combine enables */
140 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
141}
142
143/*
144 * Initialise the original winchip with read only MCR registers
145 * no used bitmask for the BIOS to pass on and write only control
146 */
147
148static void __init
149centaur_mcr0_init(void)
150{
151 unsigned i;
152
153 /* Unfortunately, MCR's are read-only, so there is no way to
154 * find out what the bios might have done.
155 */
156
157 /* Clear any unconfigured MCR's.
158 * This way we are sure that the centaur_mcr array contains the actual
159 * values. The disadvantage is that any BIOS tweaks are thus undone.
160 *
161 */
162 for (i = 0; i < 8; ++i) {
163 if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
164 wrmsr(MSR_IDT_MCR0 + i, 0, 0);
165 }
166
167 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */
168}
169
170/*
171 * Initialise Winchip series MCR registers
172 */
173
174static void __init
175centaur_mcr_init(void)
176{
177 struct set_mtrr_context ctxt;
178
179 set_mtrr_prepare_save(&ctxt);
180 set_mtrr_cache_disable(&ctxt);
181
182 if (boot_cpu_data.x86_model == 4)
183 centaur_mcr0_init();
184 else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
185 centaur_mcr1_init();
186
187 set_mtrr_done(&ctxt);
188}
189#endif
190
191static int centaur_validate_add_page(unsigned long base,
192 unsigned long size, unsigned int type)
193{
194 /*
195 * FIXME: Winchip2 supports uncached
196 */
197 if (type != MTRR_TYPE_WRCOMB &&
198 (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
199 printk(KERN_WARNING
200 "mtrr: only write-combining%s supported\n",
201 centaur_mcr_type ? " and uncacheable are"
202 : " is");
203 return -EINVAL;
204 }
205 return 0;
206}
207
208static struct mtrr_ops centaur_mtrr_ops = {
209 .vendor = X86_VENDOR_CENTAUR,
210// .init = centaur_mcr_init,
211 .set = centaur_set_mcr,
212 .get = centaur_get_mcr,
213 .get_free_region = centaur_get_free_region,
214 .validate_add_page = centaur_validate_add_page,
215 .have_wrcomb = positive_have_wrcomb,
216};
217
218int __init centaur_init_mtrr(void)
219{
220 set_mtrr_ops(&centaur_mtrr_ops);
221 return 0;
222}
223
224//arch_initcall(centaur_init_mtrr);
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
deleted file mode 100644
index 2287d4863a8a..000000000000
--- a/arch/i386/kernel/cpu/mtrr/cyrix.c
+++ /dev/null
@@ -1,380 +0,0 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5#include <asm/io.h>
6#include <asm/processor-cyrix.h>
7#include "mtrr.h"
8
9int arr3_protected;
10
11static void
12cyrix_get_arr(unsigned int reg, unsigned long *base,
13 unsigned long *size, mtrr_type * type)
14{
15 unsigned long flags;
16 unsigned char arr, ccr3, rcr, shift;
17
18 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
19
20 /* Save flags and disable interrupts */
21 local_irq_save(flags);
22
23 ccr3 = getCx86(CX86_CCR3);
24 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
25 ((unsigned char *) base)[3] = getCx86(arr);
26 ((unsigned char *) base)[2] = getCx86(arr + 1);
27 ((unsigned char *) base)[1] = getCx86(arr + 2);
28 rcr = getCx86(CX86_RCR_BASE + reg);
29 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
30
31 /* Enable interrupts if it was enabled previously */
32 local_irq_restore(flags);
33 shift = ((unsigned char *) base)[1] & 0x0f;
34 *base >>= PAGE_SHIFT;
35
36 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
37 * Note: shift==0xf means 4G, this is unsupported.
38 */
39 if (shift)
40 *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
41 else
42 *size = 0;
43
44 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
45 if (reg < 7) {
46 switch (rcr) {
47 case 1:
48 *type = MTRR_TYPE_UNCACHABLE;
49 break;
50 case 8:
51 *type = MTRR_TYPE_WRBACK;
52 break;
53 case 9:
54 *type = MTRR_TYPE_WRCOMB;
55 break;
56 case 24:
57 default:
58 *type = MTRR_TYPE_WRTHROUGH;
59 break;
60 }
61 } else {
62 switch (rcr) {
63 case 0:
64 *type = MTRR_TYPE_UNCACHABLE;
65 break;
66 case 8:
67 *type = MTRR_TYPE_WRCOMB;
68 break;
69 case 9:
70 *type = MTRR_TYPE_WRBACK;
71 break;
72 case 25:
73 default:
74 *type = MTRR_TYPE_WRTHROUGH;
75 break;
76 }
77 }
78}
79
80static int
81cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
82/* [SUMMARY] Get a free ARR.
83 <base> The starting (base) address of the region.
84 <size> The size (in bytes) of the region.
85 [RETURNS] The index of the region on success, else -1 on error.
86*/
87{
88 int i;
89 mtrr_type ltype;
90 unsigned long lbase, lsize;
91
92 switch (replace_reg) {
93 case 7:
94 if (size < 0x40)
95 break;
96 case 6:
97 case 5:
98 case 4:
99 return replace_reg;
100 case 3:
101 if (arr3_protected)
102 break;
103 case 2:
104 case 1:
105 case 0:
106 return replace_reg;
107 }
108 /* If we are to set up a region >32M then look at ARR7 immediately */
109 if (size > 0x2000) {
110 cyrix_get_arr(7, &lbase, &lsize, &ltype);
111 if (lsize == 0)
112 return 7;
113 /* Else try ARR0-ARR6 first */
114 } else {
115 for (i = 0; i < 7; i++) {
116 cyrix_get_arr(i, &lbase, &lsize, &ltype);
117 if ((i == 3) && arr3_protected)
118 continue;
119 if (lsize == 0)
120 return i;
121 }
122 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
123 cyrix_get_arr(i, &lbase, &lsize, &ltype);
124 if ((lsize == 0) && (size >= 0x40))
125 return i;
126 }
127 return -ENOSPC;
128}
129
130static u32 cr4 = 0;
131static u32 ccr3;
132
133static void prepare_set(void)
134{
135 u32 cr0;
136
137 /* Save value of CR4 and clear Page Global Enable (bit 7) */
138 if ( cpu_has_pge ) {
139 cr4 = read_cr4();
140 write_cr4(cr4 & ~X86_CR4_PGE);
141 }
142
143 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
144 a side-effect */
145 cr0 = read_cr0() | 0x40000000;
146 wbinvd();
147 write_cr0(cr0);
148 wbinvd();
149
150 /* Cyrix ARRs - everything else were excluded at the top */
151 ccr3 = getCx86(CX86_CCR3);
152
153 /* Cyrix ARRs - everything else were excluded at the top */
154 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
155
156}
157
158static void post_set(void)
159{
160 /* Flush caches and TLBs */
161 wbinvd();
162
163 /* Cyrix ARRs - everything else was excluded at the top */
164 setCx86(CX86_CCR3, ccr3);
165
166 /* Enable caches */
167 write_cr0(read_cr0() & 0xbfffffff);
168
169 /* Restore value of CR4 */
170 if ( cpu_has_pge )
171 write_cr4(cr4);
172}
173
174static void cyrix_set_arr(unsigned int reg, unsigned long base,
175 unsigned long size, mtrr_type type)
176{
177 unsigned char arr, arr_type, arr_size;
178
179 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
180
181 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
182 if (reg >= 7)
183 size >>= 6;
184
185 size &= 0x7fff; /* make sure arr_size <= 14 */
186 for (arr_size = 0; size; arr_size++, size >>= 1) ;
187
188 if (reg < 7) {
189 switch (type) {
190 case MTRR_TYPE_UNCACHABLE:
191 arr_type = 1;
192 break;
193 case MTRR_TYPE_WRCOMB:
194 arr_type = 9;
195 break;
196 case MTRR_TYPE_WRTHROUGH:
197 arr_type = 24;
198 break;
199 default:
200 arr_type = 8;
201 break;
202 }
203 } else {
204 switch (type) {
205 case MTRR_TYPE_UNCACHABLE:
206 arr_type = 0;
207 break;
208 case MTRR_TYPE_WRCOMB:
209 arr_type = 8;
210 break;
211 case MTRR_TYPE_WRTHROUGH:
212 arr_type = 25;
213 break;
214 default:
215 arr_type = 9;
216 break;
217 }
218 }
219
220 prepare_set();
221
222 base <<= PAGE_SHIFT;
223 setCx86(arr, ((unsigned char *) &base)[3]);
224 setCx86(arr + 1, ((unsigned char *) &base)[2]);
225 setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
226 setCx86(CX86_RCR_BASE + reg, arr_type);
227
228 post_set();
229}
230
231typedef struct {
232 unsigned long base;
233 unsigned long size;
234 mtrr_type type;
235} arr_state_t;
236
237static arr_state_t arr_state[8] = {
238 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
239 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
240};
241
242static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
243
244static void cyrix_set_all(void)
245{
246 int i;
247
248 prepare_set();
249
250 /* the CCRs are not contiguous */
251 for (i = 0; i < 4; i++)
252 setCx86(CX86_CCR0 + i, ccr_state[i]);
253 for (; i < 7; i++)
254 setCx86(CX86_CCR4 + i, ccr_state[i]);
255 for (i = 0; i < 8; i++)
256 cyrix_set_arr(i, arr_state[i].base,
257 arr_state[i].size, arr_state[i].type);
258
259 post_set();
260}
261
262#if 0
263/*
264 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
265 * with the SMM (System Management Mode) mode. So we need the following:
266 * Check whether SMI_LOCK (CCR3 bit 0) is set
267 * if it is set, write a warning message: ARR3 cannot be changed!
268 * (it cannot be changed until the next processor reset)
269 * if it is reset, then we can change it, set all the needed bits:
270 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
271 * - disable access to SMM memory (CCR1 bit 2 reset)
272 * - disable SMM mode (CCR1 bit 1 reset)
273 * - disable write protection of ARR3 (CCR6 bit 1 reset)
274 * - (maybe) disable ARR3
275 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
276 */
277static void __init
278cyrix_arr_init(void)
279{
280 struct set_mtrr_context ctxt;
281 unsigned char ccr[7];
282 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
283#ifdef CONFIG_SMP
284 int i;
285#endif
286
287 /* flush cache and enable MAPEN */
288 set_mtrr_prepare_save(&ctxt);
289 set_mtrr_cache_disable(&ctxt);
290
291 /* Save all CCRs locally */
292 ccr[0] = getCx86(CX86_CCR0);
293 ccr[1] = getCx86(CX86_CCR1);
294 ccr[2] = getCx86(CX86_CCR2);
295 ccr[3] = ctxt.ccr3;
296 ccr[4] = getCx86(CX86_CCR4);
297 ccr[5] = getCx86(CX86_CCR5);
298 ccr[6] = getCx86(CX86_CCR6);
299
300 if (ccr[3] & 1) {
301 ccrc[3] = 1;
302 arr3_protected = 1;
303 } else {
304 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
305 * access to SMM memory through ARR3 (bit 7).
306 */
307 if (ccr[1] & 0x80) {
308 ccr[1] &= 0x7f;
309 ccrc[1] |= 0x80;
310 }
311 if (ccr[1] & 0x04) {
312 ccr[1] &= 0xfb;
313 ccrc[1] |= 0x04;
314 }
315 if (ccr[1] & 0x02) {
316 ccr[1] &= 0xfd;
317 ccrc[1] |= 0x02;
318 }
319 arr3_protected = 0;
320 if (ccr[6] & 0x02) {
321 ccr[6] &= 0xfd;
322 ccrc[6] = 1; /* Disable write protection of ARR3 */
323 setCx86(CX86_CCR6, ccr[6]);
324 }
325 /* Disable ARR3. This is safe now that we disabled SMM. */
326 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
327 }
328 /* If we changed CCR1 in memory, change it in the processor, too. */
329 if (ccrc[1])
330 setCx86(CX86_CCR1, ccr[1]);
331
332 /* Enable ARR usage by the processor */
333 if (!(ccr[5] & 0x20)) {
334 ccr[5] |= 0x20;
335 ccrc[5] = 1;
336 setCx86(CX86_CCR5, ccr[5]);
337 }
338#ifdef CONFIG_SMP
339 for (i = 0; i < 7; i++)
340 ccr_state[i] = ccr[i];
341 for (i = 0; i < 8; i++)
342 cyrix_get_arr(i,
343 &arr_state[i].base, &arr_state[i].size,
344 &arr_state[i].type);
345#endif
346
347 set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
348
349 if (ccrc[5])
350 printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
351 if (ccrc[3])
352 printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
353/*
354 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
355 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
356 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
357*/
358 if (ccrc[6])
359 printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
360}
361#endif
362
363static struct mtrr_ops cyrix_mtrr_ops = {
364 .vendor = X86_VENDOR_CYRIX,
365// .init = cyrix_arr_init,
366 .set_all = cyrix_set_all,
367 .set = cyrix_set_arr,
368 .get = cyrix_get_arr,
369 .get_free_region = cyrix_get_free_region,
370 .validate_add_page = generic_validate_add_page,
371 .have_wrcomb = positive_have_wrcomb,
372};
373
374int __init cyrix_init_mtrr(void)
375{
376 set_mtrr_ops(&cyrix_mtrr_ops);
377 return 0;
378}
379
380//arch_initcall(cyrix_init_mtrr);
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
deleted file mode 100644
index 56f64e34829f..000000000000
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ /dev/null
@@ -1,509 +0,0 @@
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
6#include <linux/module.h>
7#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
12#include <asm/tlbflush.h>
13#include "mtrr.h"
14
15struct mtrr_state {
16 struct mtrr_var_range *var_ranges;
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled;
19 unsigned char have_fixed;
20 mtrr_type def_type;
21};
22
23struct fixed_range_block {
24 int base_msr; /* start address of an MTRR block */
25 int ranges; /* number of MTRRs in this block */
26};
27
28static struct fixed_range_block fixed_range_blocks[] = {
29 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
30 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
31 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
32 {}
33};
34
35static unsigned long smp_changes_mask;
36static struct mtrr_state mtrr_state = {};
37
38#undef MODULE_PARAM_PREFIX
39#define MODULE_PARAM_PREFIX "mtrr."
40
41static int mtrr_show;
42module_param_named(show, mtrr_show, bool, 0);
43
44/* Get the MSR pair relating to a var range */
45static void
46get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
47{
48 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
49 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
50}
51
52static void
53get_fixed_ranges(mtrr_type * frs)
54{
55 unsigned int *p = (unsigned int *) frs;
56 int i;
57
58 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
59
60 for (i = 0; i < 2; i++)
61 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
62 for (i = 0; i < 8; i++)
63 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
64}
65
66void mtrr_save_fixed_ranges(void *info)
67{
68 if (cpu_has_mtrr)
69 get_fixed_ranges(mtrr_state.fixed_ranges);
70}
71
72static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
73{
74 unsigned i;
75
76 for (i = 0; i < 8; ++i, ++types, base += step)
77 printk(KERN_INFO "MTRR %05X-%05X %s\n",
78 base, base + step - 1, mtrr_attrib_to_str(*types));
79}
80
81/* Grab all of the MTRR state for this CPU into *state */
82void __init get_mtrr_state(void)
83{
84 unsigned int i;
85 struct mtrr_var_range *vrs;
86 unsigned lo, dummy;
87
88 if (!mtrr_state.var_ranges) {
89 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
90 GFP_KERNEL);
91 if (!mtrr_state.var_ranges)
92 return;
93 }
94 vrs = mtrr_state.var_ranges;
95
96 rdmsr(MTRRcap_MSR, lo, dummy);
97 mtrr_state.have_fixed = (lo >> 8) & 1;
98
99 for (i = 0; i < num_var_ranges; i++)
100 get_mtrr_var_range(i, &vrs[i]);
101 if (mtrr_state.have_fixed)
102 get_fixed_ranges(mtrr_state.fixed_ranges);
103
104 rdmsr(MTRRdefType_MSR, lo, dummy);
105 mtrr_state.def_type = (lo & 0xff);
106 mtrr_state.enabled = (lo & 0xc00) >> 10;
107
108 if (mtrr_show) {
109 int high_width;
110
111 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
112 if (mtrr_state.have_fixed) {
113 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
114 mtrr_state.enabled & 1 ? "en" : "dis");
115 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
116 for (i = 0; i < 2; ++i)
117 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
118 for (i = 0; i < 8; ++i)
119 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
120 }
121 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
122 mtrr_state.enabled & 2 ? "en" : "dis");
123 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
124 for (i = 0; i < num_var_ranges; ++i) {
125 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
126 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
127 i,
128 high_width,
129 mtrr_state.var_ranges[i].base_hi,
130 mtrr_state.var_ranges[i].base_lo >> 12,
131 high_width,
132 mtrr_state.var_ranges[i].mask_hi,
133 mtrr_state.var_ranges[i].mask_lo >> 12,
134 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
135 else
136 printk(KERN_INFO "MTRR %u disabled\n", i);
137 }
138 }
139}
140
141/* Some BIOS's are fucked and don't set all MTRRs the same! */
142void __init mtrr_state_warn(void)
143{
144 unsigned long mask = smp_changes_mask;
145
146 if (!mask)
147 return;
148 if (mask & MTRR_CHANGE_MASK_FIXED)
149 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
150 if (mask & MTRR_CHANGE_MASK_VARIABLE)
151 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
152 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
153 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
154 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
155 printk(KERN_INFO "mtrr: corrected configuration.\n");
156}
157
158/* Doesn't attempt to pass an error out to MTRR users
159 because it's quite complicated in some cases and probably not
160 worth it because the best error handling is to ignore it. */
161void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
162{
163 if (wrmsr_safe(msr, a, b) < 0)
164 printk(KERN_ERR
165 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
166 smp_processor_id(), msr, a, b);
167}
168
169/**
170 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
171 * see AMD publication no. 24593, chapter 3.2.1 for more information
172 */
173static inline void k8_enable_fixed_iorrs(void)
174{
175 unsigned lo, hi;
176
177 rdmsr(MSR_K8_SYSCFG, lo, hi);
178 mtrr_wrmsr(MSR_K8_SYSCFG, lo
179 | K8_MTRRFIXRANGE_DRAM_ENABLE
180 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
181}
182
183/**
184 * Checks and updates an fixed-range MTRR if it differs from the value it
185 * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also.
186 * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
187 * \param msr MSR address of the MTTR which should be checked and updated
188 * \param changed pointer which indicates whether the MTRR needed to be changed
189 * \param msrwords pointer to the MSR values which the MSR should have
190 */
191static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
192{
193 unsigned lo, hi;
194
195 rdmsr(msr, lo, hi);
196
197 if (lo != msrwords[0] || hi != msrwords[1]) {
198 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
199 boot_cpu_data.x86 == 15 &&
200 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
201 k8_enable_fixed_iorrs();
202 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
203 *changed = TRUE;
204 }
205}
206
207int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
208/* [SUMMARY] Get a free MTRR.
209 <base> The starting (base) address of the region.
210 <size> The size (in bytes) of the region.
211 [RETURNS] The index of the region on success, else -1 on error.
212*/
213{
214 int i, max;
215 mtrr_type ltype;
216 unsigned long lbase, lsize;
217
218 max = num_var_ranges;
219 if (replace_reg >= 0 && replace_reg < max)
220 return replace_reg;
221 for (i = 0; i < max; ++i) {
222 mtrr_if->get(i, &lbase, &lsize, &ltype);
223 if (lsize == 0)
224 return i;
225 }
226 return -ENOSPC;
227}
228
229static void generic_get_mtrr(unsigned int reg, unsigned long *base,
230 unsigned long *size, mtrr_type *type)
231{
232 unsigned int mask_lo, mask_hi, base_lo, base_hi;
233
234 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
235 if ((mask_lo & 0x800) == 0) {
236 /* Invalid (i.e. free) range */
237 *base = 0;
238 *size = 0;
239 *type = 0;
240 return;
241 }
242
243 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
244
245 /* Work out the shifted address mask. */
246 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
247 | mask_lo >> PAGE_SHIFT;
248
249 /* This works correctly if size is a power of two, i.e. a
250 contiguous range. */
251 *size = -mask_lo;
252 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
253 *type = base_lo & 0xff;
254}
255
256/**
257 * Checks and updates the fixed-range MTRRs if they differ from the saved set
258 * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges()
259 */
260static int set_fixed_ranges(mtrr_type * frs)
261{
262 unsigned long long *saved = (unsigned long long *) frs;
263 int changed = FALSE;
264 int block=-1, range;
265
266 while (fixed_range_blocks[++block].ranges)
267 for (range=0; range < fixed_range_blocks[block].ranges; range++)
268 set_fixed_range(fixed_range_blocks[block].base_msr + range,
269 &changed, (unsigned int *) saved++);
270
271 return changed;
272}
273
274/* Set the MSR pair relating to a var range. Returns TRUE if
275 changes are made */
276static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
277{
278 unsigned int lo, hi;
279 int changed = FALSE;
280
281 rdmsr(MTRRphysBase_MSR(index), lo, hi);
282 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
283 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
284 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
285 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
286 changed = TRUE;
287 }
288
289 rdmsr(MTRRphysMask_MSR(index), lo, hi);
290
291 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
292 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
293 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
294 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
295 changed = TRUE;
296 }
297 return changed;
298}
299
300static u32 deftype_lo, deftype_hi;
301
302static unsigned long set_mtrr_state(void)
303/* [SUMMARY] Set the MTRR state for this CPU.
304 <state> The MTRR state information to read.
305 <ctxt> Some relevant CPU context.
306 [NOTE] The CPU must already be in a safe state for MTRR changes.
307 [RETURNS] 0 if no changes made, else a mask indication what was changed.
308*/
309{
310 unsigned int i;
311 unsigned long change_mask = 0;
312
313 for (i = 0; i < num_var_ranges; i++)
314 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
315 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
316
317 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
318 change_mask |= MTRR_CHANGE_MASK_FIXED;
319
320 /* Set_mtrr_restore restores the old value of MTRRdefType,
321 so to set it we fiddle with the saved value */
322 if ((deftype_lo & 0xff) != mtrr_state.def_type
323 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
324 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
325 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
326 }
327
328 return change_mask;
329}
330
331
332static unsigned long cr4 = 0;
333static DEFINE_SPINLOCK(set_atomicity_lock);
334
335/*
336 * Since we are disabling the cache don't allow any interrupts - they
337 * would run extremely slow and would only increase the pain. The caller must
338 * ensure that local interrupts are disabled and are reenabled after post_set()
339 * has been called.
340 */
341
342static void prepare_set(void) __acquires(set_atomicity_lock)
343{
344 unsigned long cr0;
345
346 /* Note that this is not ideal, since the cache is only flushed/disabled
347 for this CPU while the MTRRs are changed, but changing this requires
348 more invasive changes to the way the kernel boots */
349
350 spin_lock(&set_atomicity_lock);
351
352 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
353 cr0 = read_cr0() | 0x40000000; /* set CD flag */
354 write_cr0(cr0);
355 wbinvd();
356
357 /* Save value of CR4 and clear Page Global Enable (bit 7) */
358 if ( cpu_has_pge ) {
359 cr4 = read_cr4();
360 write_cr4(cr4 & ~X86_CR4_PGE);
361 }
362
363 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
364 __flush_tlb();
365
366 /* Save MTRR state */
367 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
368
369 /* Disable MTRRs, and set the default type to uncached */
370 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
371}
372
373static void post_set(void) __releases(set_atomicity_lock)
374{
375 /* Flush TLBs (no need to flush caches - they are disabled) */
376 __flush_tlb();
377
378 /* Intel (P6) standard MTRRs */
379 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
380
381 /* Enable caches */
382 write_cr0(read_cr0() & 0xbfffffff);
383
384 /* Restore value of CR4 */
385 if ( cpu_has_pge )
386 write_cr4(cr4);
387 spin_unlock(&set_atomicity_lock);
388}
389
390static void generic_set_all(void)
391{
392 unsigned long mask, count;
393 unsigned long flags;
394
395 local_irq_save(flags);
396 prepare_set();
397
398 /* Actually set the state */
399 mask = set_mtrr_state();
400
401 post_set();
402 local_irq_restore(flags);
403
404 /* Use the atomic bitops to update the global mask */
405 for (count = 0; count < sizeof mask * 8; ++count) {
406 if (mask & 0x01)
407 set_bit(count, &smp_changes_mask);
408 mask >>= 1;
409 }
410
411}
412
413static void generic_set_mtrr(unsigned int reg, unsigned long base,
414 unsigned long size, mtrr_type type)
415/* [SUMMARY] Set variable MTRR register on the local CPU.
416 <reg> The register to set.
417 <base> The base address of the region.
418 <size> The size of the region. If this is 0 the region is disabled.
419 <type> The type of the region.
420 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
421 be done externally.
422 [RETURNS] Nothing.
423*/
424{
425 unsigned long flags;
426 struct mtrr_var_range *vr;
427
428 vr = &mtrr_state.var_ranges[reg];
429
430 local_irq_save(flags);
431 prepare_set();
432
433 if (size == 0) {
434 /* The invalid bit is kept in the mask, so we simply clear the
435 relevant mask register to disable a range. */
436 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
437 memset(vr, 0, sizeof(struct mtrr_var_range));
438 } else {
439 vr->base_lo = base << PAGE_SHIFT | type;
440 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
441 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
442 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
443
444 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
445 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
446 }
447
448 post_set();
449 local_irq_restore(flags);
450}
451
452int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
453{
454 unsigned long lbase, last;
455
456 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
457 and not touch 0x70000000->0x7003FFFF */
458 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
459 boot_cpu_data.x86_model == 1 &&
460 boot_cpu_data.x86_mask <= 7) {
461 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
462 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
463 return -EINVAL;
464 }
465 if (!(base + size < 0x70000 || base > 0x7003F) &&
466 (type == MTRR_TYPE_WRCOMB
467 || type == MTRR_TYPE_WRBACK)) {
468 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
469 return -EINVAL;
470 }
471 }
472
473 /* Check upper bits of base and last are equal and lower bits are 0
474 for base and 1 for last */
475 last = base + size - 1;
476 for (lbase = base; !(lbase & 1) && (last & 1);
477 lbase = lbase >> 1, last = last >> 1) ;
478 if (lbase != last) {
479 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
480 base, size);
481 return -EINVAL;
482 }
483 return 0;
484}
485
486
487static int generic_have_wrcomb(void)
488{
489 unsigned long config, dummy;
490 rdmsr(MTRRcap_MSR, config, dummy);
491 return (config & (1 << 10));
492}
493
494int positive_have_wrcomb(void)
495{
496 return 1;
497}
498
499/* generic structure...
500 */
501struct mtrr_ops generic_mtrr_ops = {
502 .use_intel_if = 1,
503 .set_all = generic_set_all,
504 .get = generic_get_mtrr,
505 .get_free_region = generic_get_free_region,
506 .set = generic_set_mtrr,
507 .validate_add_page = generic_validate_add_page,
508 .have_wrcomb = generic_have_wrcomb,
509};
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
deleted file mode 100644
index c7d8f1756745..000000000000
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ /dev/null
@@ -1,439 +0,0 @@
1#include <linux/init.h>
2#include <linux/proc_fs.h>
3#include <linux/capability.h>
4#include <linux/ctype.h>
5#include <linux/module.h>
6#include <linux/seq_file.h>
7#include <asm/uaccess.h>
8
9#define LINE_SIZE 80
10
11#include <asm/mtrr.h>
12#include "mtrr.h"
13
14/* RED-PEN: this is accessed without any locking */
15extern unsigned int *usage_table;
16
17
18#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
19
20static const char *const mtrr_strings[MTRR_NUM_TYPES] =
21{
22 "uncachable", /* 0 */
23 "write-combining", /* 1 */
24 "?", /* 2 */
25 "?", /* 3 */
26 "write-through", /* 4 */
27 "write-protect", /* 5 */
28 "write-back", /* 6 */
29};
30
31const char *mtrr_attrib_to_str(int x)
32{
33 return (x <= 6) ? mtrr_strings[x] : "?";
34}
35
36#ifdef CONFIG_PROC_FS
37
38static int
39mtrr_file_add(unsigned long base, unsigned long size,
40 unsigned int type, char increment, struct file *file, int page)
41{
42 int reg, max;
43 unsigned int *fcount = FILE_FCOUNT(file);
44
45 max = num_var_ranges;
46 if (fcount == NULL) {
47 fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
48 if (!fcount)
49 return -ENOMEM;
50 FILE_FCOUNT(file) = fcount;
51 }
52 if (!page) {
53 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
54 return -EINVAL;
55 base >>= PAGE_SHIFT;
56 size >>= PAGE_SHIFT;
57 }
58 reg = mtrr_add_page(base, size, type, 1);
59 if (reg >= 0)
60 ++fcount[reg];
61 return reg;
62}
63
64static int
65mtrr_file_del(unsigned long base, unsigned long size,
66 struct file *file, int page)
67{
68 int reg;
69 unsigned int *fcount = FILE_FCOUNT(file);
70
71 if (!page) {
72 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
73 return -EINVAL;
74 base >>= PAGE_SHIFT;
75 size >>= PAGE_SHIFT;
76 }
77 reg = mtrr_del_page(-1, base, size);
78 if (reg < 0)
79 return reg;
80 if (fcount == NULL)
81 return reg;
82 if (fcount[reg] < 1)
83 return -EINVAL;
84 --fcount[reg];
85 return reg;
86}
87
88/* RED-PEN: seq_file can seek now. this is ignored. */
89static ssize_t
90mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
91/* Format of control line:
92 "base=%Lx size=%Lx type=%s" OR:
93 "disable=%d"
94*/
95{
96 int i, err;
97 unsigned long reg;
98 unsigned long long base, size;
99 char *ptr;
100 char line[LINE_SIZE];
101 size_t linelen;
102
103 if (!capable(CAP_SYS_ADMIN))
104 return -EPERM;
105 if (!len)
106 return -EINVAL;
107 memset(line, 0, LINE_SIZE);
108 if (len > LINE_SIZE)
109 len = LINE_SIZE;
110 if (copy_from_user(line, buf, len - 1))
111 return -EFAULT;
112 linelen = strlen(line);
113 ptr = line + linelen - 1;
114 if (linelen && *ptr == '\n')
115 *ptr = '\0';
116 if (!strncmp(line, "disable=", 8)) {
117 reg = simple_strtoul(line + 8, &ptr, 0);
118 err = mtrr_del_page(reg, 0, 0);
119 if (err < 0)
120 return err;
121 return len;
122 }
123 if (strncmp(line, "base=", 5))
124 return -EINVAL;
125 base = simple_strtoull(line + 5, &ptr, 0);
126 for (; isspace(*ptr); ++ptr) ;
127 if (strncmp(ptr, "size=", 5))
128 return -EINVAL;
129 size = simple_strtoull(ptr + 5, &ptr, 0);
130 if ((base & 0xfff) || (size & 0xfff))
131 return -EINVAL;
132 for (; isspace(*ptr); ++ptr) ;
133 if (strncmp(ptr, "type=", 5))
134 return -EINVAL;
135 ptr += 5;
136 for (; isspace(*ptr); ++ptr) ;
137 for (i = 0; i < MTRR_NUM_TYPES; ++i) {
138 if (strcmp(ptr, mtrr_strings[i]))
139 continue;
140 base >>= PAGE_SHIFT;
141 size >>= PAGE_SHIFT;
142 err =
143 mtrr_add_page((unsigned long) base, (unsigned long) size, i,
144 1);
145 if (err < 0)
146 return err;
147 return len;
148 }
149 return -EINVAL;
150}
151
152static long
153mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
154{
155 int err = 0;
156 mtrr_type type;
157 unsigned long size;
158 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg;
161
162 switch (cmd) {
163 case MTRRIOC_ADD_ENTRY:
164 case MTRRIOC_SET_ENTRY:
165 case MTRRIOC_DEL_ENTRY:
166 case MTRRIOC_KILL_ENTRY:
167 case MTRRIOC_ADD_PAGE_ENTRY:
168 case MTRRIOC_SET_PAGE_ENTRY:
169 case MTRRIOC_DEL_PAGE_ENTRY:
170 case MTRRIOC_KILL_PAGE_ENTRY:
171 if (copy_from_user(&sentry, arg, sizeof sentry))
172 return -EFAULT;
173 break;
174 case MTRRIOC_GET_ENTRY:
175 case MTRRIOC_GET_PAGE_ENTRY:
176 if (copy_from_user(&gentry, arg, sizeof gentry))
177 return -EFAULT;
178 break;
179#ifdef CONFIG_COMPAT
180 case MTRRIOC32_ADD_ENTRY:
181 case MTRRIOC32_SET_ENTRY:
182 case MTRRIOC32_DEL_ENTRY:
183 case MTRRIOC32_KILL_ENTRY:
184 case MTRRIOC32_ADD_PAGE_ENTRY:
185 case MTRRIOC32_SET_PAGE_ENTRY:
186 case MTRRIOC32_DEL_PAGE_ENTRY:
187 case MTRRIOC32_KILL_PAGE_ENTRY: {
188 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
189 err = get_user(sentry.base, &s32->base);
190 err |= get_user(sentry.size, &s32->size);
191 err |= get_user(sentry.type, &s32->type);
192 if (err)
193 return err;
194 break;
195 }
196 case MTRRIOC32_GET_ENTRY:
197 case MTRRIOC32_GET_PAGE_ENTRY: {
198 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
199 err = get_user(gentry.regnum, &g32->regnum);
200 err |= get_user(gentry.base, &g32->base);
201 err |= get_user(gentry.size, &g32->size);
202 err |= get_user(gentry.type, &g32->type);
203 if (err)
204 return err;
205 break;
206 }
207#endif
208 }
209
210 switch (cmd) {
211 default:
212 return -ENOTTY;
213 case MTRRIOC_ADD_ENTRY:
214#ifdef CONFIG_COMPAT
215 case MTRRIOC32_ADD_ENTRY:
216#endif
217 if (!capable(CAP_SYS_ADMIN))
218 return -EPERM;
219 err =
220 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
221 file, 0);
222 break;
223 case MTRRIOC_SET_ENTRY:
224#ifdef CONFIG_COMPAT
225 case MTRRIOC32_SET_ENTRY:
226#endif
227 if (!capable(CAP_SYS_ADMIN))
228 return -EPERM;
229 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
230 break;
231 case MTRRIOC_DEL_ENTRY:
232#ifdef CONFIG_COMPAT
233 case MTRRIOC32_DEL_ENTRY:
234#endif
235 if (!capable(CAP_SYS_ADMIN))
236 return -EPERM;
237 err = mtrr_file_del(sentry.base, sentry.size, file, 0);
238 break;
239 case MTRRIOC_KILL_ENTRY:
240#ifdef CONFIG_COMPAT
241 case MTRRIOC32_KILL_ENTRY:
242#endif
243 if (!capable(CAP_SYS_ADMIN))
244 return -EPERM;
245 err = mtrr_del(-1, sentry.base, sentry.size);
246 break;
247 case MTRRIOC_GET_ENTRY:
248#ifdef CONFIG_COMPAT
249 case MTRRIOC32_GET_ENTRY:
250#endif
251 if (gentry.regnum >= num_var_ranges)
252 return -EINVAL;
253 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
254
255 /* Hide entries that go above 4GB */
256 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
257 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
258 gentry.base = gentry.size = gentry.type = 0;
259 else {
260 gentry.base <<= PAGE_SHIFT;
261 gentry.size = size << PAGE_SHIFT;
262 gentry.type = type;
263 }
264
265 break;
266 case MTRRIOC_ADD_PAGE_ENTRY:
267#ifdef CONFIG_COMPAT
268 case MTRRIOC32_ADD_PAGE_ENTRY:
269#endif
270 if (!capable(CAP_SYS_ADMIN))
271 return -EPERM;
272 err =
273 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
274 file, 1);
275 break;
276 case MTRRIOC_SET_PAGE_ENTRY:
277#ifdef CONFIG_COMPAT
278 case MTRRIOC32_SET_PAGE_ENTRY:
279#endif
280 if (!capable(CAP_SYS_ADMIN))
281 return -EPERM;
282 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
283 break;
284 case MTRRIOC_DEL_PAGE_ENTRY:
285#ifdef CONFIG_COMPAT
286 case MTRRIOC32_DEL_PAGE_ENTRY:
287#endif
288 if (!capable(CAP_SYS_ADMIN))
289 return -EPERM;
290 err = mtrr_file_del(sentry.base, sentry.size, file, 1);
291 break;
292 case MTRRIOC_KILL_PAGE_ENTRY:
293#ifdef CONFIG_COMPAT
294 case MTRRIOC32_KILL_PAGE_ENTRY:
295#endif
296 if (!capable(CAP_SYS_ADMIN))
297 return -EPERM;
298 err = mtrr_del_page(-1, sentry.base, sentry.size);
299 break;
300 case MTRRIOC_GET_PAGE_ENTRY:
301#ifdef CONFIG_COMPAT
302 case MTRRIOC32_GET_PAGE_ENTRY:
303#endif
304 if (gentry.regnum >= num_var_ranges)
305 return -EINVAL;
306 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
307 /* Hide entries that would overflow */
308 if (size != (__typeof__(gentry.size))size)
309 gentry.base = gentry.size = gentry.type = 0;
310 else {
311 gentry.size = size;
312 gentry.type = type;
313 }
314 break;
315 }
316
317 if (err)
318 return err;
319
320 switch(cmd) {
321 case MTRRIOC_GET_ENTRY:
322 case MTRRIOC_GET_PAGE_ENTRY:
323 if (copy_to_user(arg, &gentry, sizeof gentry))
324 err = -EFAULT;
325 break;
326#ifdef CONFIG_COMPAT
327 case MTRRIOC32_GET_ENTRY:
328 case MTRRIOC32_GET_PAGE_ENTRY: {
329 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
330 err = put_user(gentry.base, &g32->base);
331 err |= put_user(gentry.size, &g32->size);
332 err |= put_user(gentry.regnum, &g32->regnum);
333 err |= put_user(gentry.type, &g32->type);
334 break;
335 }
336#endif
337 }
338 return err;
339}
340
341static int
342mtrr_close(struct inode *ino, struct file *file)
343{
344 int i, max;
345 unsigned int *fcount = FILE_FCOUNT(file);
346
347 if (fcount != NULL) {
348 max = num_var_ranges;
349 for (i = 0; i < max; ++i) {
350 while (fcount[i] > 0) {
351 mtrr_del(i, 0, 0);
352 --fcount[i];
353 }
354 }
355 kfree(fcount);
356 FILE_FCOUNT(file) = NULL;
357 }
358 return single_release(ino, file);
359}
360
361static int mtrr_seq_show(struct seq_file *seq, void *offset);
362
363static int mtrr_open(struct inode *inode, struct file *file)
364{
365 if (!mtrr_if)
366 return -EIO;
367 if (!mtrr_if->get)
368 return -ENXIO;
369 return single_open(file, mtrr_seq_show, NULL);
370}
371
372static const struct file_operations mtrr_fops = {
373 .owner = THIS_MODULE,
374 .open = mtrr_open,
375 .read = seq_read,
376 .llseek = seq_lseek,
377 .write = mtrr_write,
378 .unlocked_ioctl = mtrr_ioctl,
379 .compat_ioctl = mtrr_ioctl,
380 .release = mtrr_close,
381};
382
383
384static struct proc_dir_entry *proc_root_mtrr;
385
386
387static int mtrr_seq_show(struct seq_file *seq, void *offset)
388{
389 char factor;
390 int i, max, len;
391 mtrr_type type;
392 unsigned long base, size;
393
394 len = 0;
395 max = num_var_ranges;
396 for (i = 0; i < max; i++) {
397 mtrr_if->get(i, &base, &size, &type);
398 if (size == 0)
399 usage_table[i] = 0;
400 else {
401 if (size < (0x100000 >> PAGE_SHIFT)) {
402 /* less than 1MB */
403 factor = 'K';
404 size <<= PAGE_SHIFT - 10;
405 } else {
406 factor = 'M';
407 size >>= 20 - PAGE_SHIFT;
408 }
409 /* RED-PEN: base can be > 32bit */
410 len += seq_printf(seq,
411 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
412 i, base, base >> (20 - PAGE_SHIFT), size, factor,
413 mtrr_attrib_to_str(type), usage_table[i]);
414 }
415 }
416 return 0;
417}
418
419static int __init mtrr_if_init(void)
420{
421 struct cpuinfo_x86 *c = &boot_cpu_data;
422
423 if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
424 (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
425 (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
426 (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
427 return -ENODEV;
428
429 proc_root_mtrr =
430 create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root);
431 if (proc_root_mtrr) {
432 proc_root_mtrr->owner = THIS_MODULE;
433 proc_root_mtrr->proc_fops = &mtrr_fops;
434 }
435 return 0;
436}
437
438arch_initcall(mtrr_if_init);
439#endif /* CONFIG_PROC_FS */
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
deleted file mode 100644
index c48b6fea5ab4..000000000000
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ /dev/null
@@ -1,768 +0,0 @@
1/* Generic MTRR (Memory Type Range Register) driver.
2
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
15
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
26 section 11.11.7
27
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
29 on 6-7 March 2002.
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
32*/
33
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/pci.h>
37#include <linux/smp.h>
38#include <linux/cpu.h>
39#include <linux/mutex.h>
40
41#include <asm/mtrr.h>
42
43#include <asm/uaccess.h>
44#include <asm/processor.h>
45#include <asm/msr.h>
46#include "mtrr.h"
47
48u32 num_var_ranges = 0;
49
50unsigned int *usage_table;
51static DEFINE_MUTEX(mtrr_mutex);
52
53u64 size_or_mask, size_and_mask;
54
55static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
56
57struct mtrr_ops * mtrr_if = NULL;
58
59static void set_mtrr(unsigned int reg, unsigned long base,
60 unsigned long size, mtrr_type type);
61
62#ifndef CONFIG_X86_64
63extern int arr3_protected;
64#else
65#define arr3_protected 0
66#endif
67
68void set_mtrr_ops(struct mtrr_ops * ops)
69{
70 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
71 mtrr_ops[ops->vendor] = ops;
72}
73
74/* Returns non-zero if we have the write-combining memory type */
75static int have_wrcomb(void)
76{
77 struct pci_dev *dev;
78 u8 rev;
79
80 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
81 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
82 Don't allow it and leave room for other chipsets to be tagged */
83 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
84 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
85 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
86 if (rev <= 5) {
87 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
88 pci_dev_put(dev);
89 return 0;
90 }
91 }
92 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
93 write combining memory may resulting in data corruption */
94 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
95 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
96 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
97 pci_dev_put(dev);
98 return 0;
99 }
100 pci_dev_put(dev);
101 }
102 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
103}
104
105/* This function returns the number of variable MTRRs */
106static void __init set_num_var_ranges(void)
107{
108 unsigned long config = 0, dummy;
109
110 if (use_intel()) {
111 rdmsr(MTRRcap_MSR, config, dummy);
112 } else if (is_cpu(AMD))
113 config = 2;
114 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
115 config = 8;
116 num_var_ranges = config & 0xff;
117}
118
119static void __init init_table(void)
120{
121 int i, max;
122
123 max = num_var_ranges;
124 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
125 == NULL) {
126 printk(KERN_ERR "mtrr: could not allocate\n");
127 return;
128 }
129 for (i = 0; i < max; i++)
130 usage_table[i] = 1;
131}
132
133struct set_mtrr_data {
134 atomic_t count;
135 atomic_t gate;
136 unsigned long smp_base;
137 unsigned long smp_size;
138 unsigned int smp_reg;
139 mtrr_type smp_type;
140};
141
142#ifdef CONFIG_SMP
143
144static void ipi_handler(void *info)
145/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
146 [RETURNS] Nothing.
147*/
148{
149 struct set_mtrr_data *data = info;
150 unsigned long flags;
151
152 local_irq_save(flags);
153
154 atomic_dec(&data->count);
155 while(!atomic_read(&data->gate))
156 cpu_relax();
157
158 /* The master has cleared me to execute */
159 if (data->smp_reg != ~0U)
160 mtrr_if->set(data->smp_reg, data->smp_base,
161 data->smp_size, data->smp_type);
162 else
163 mtrr_if->set_all();
164
165 atomic_dec(&data->count);
166 while(atomic_read(&data->gate))
167 cpu_relax();
168
169 atomic_dec(&data->count);
170 local_irq_restore(flags);
171}
172
173#endif
174
175static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
176 return type1 == MTRR_TYPE_UNCACHABLE ||
177 type2 == MTRR_TYPE_UNCACHABLE ||
178 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
179 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
180}
181
182/**
183 * set_mtrr - update mtrrs on all processors
184 * @reg: mtrr in question
185 * @base: mtrr base
186 * @size: mtrr size
187 * @type: mtrr type
188 *
189 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
190 *
191 * 1. Send IPI to do the following:
192 * 2. Disable Interrupts
193 * 3. Wait for all procs to do so
194 * 4. Enter no-fill cache mode
195 * 5. Flush caches
196 * 6. Clear PGE bit
197 * 7. Flush all TLBs
198 * 8. Disable all range registers
199 * 9. Update the MTRRs
200 * 10. Enable all range registers
201 * 11. Flush all TLBs and caches again
202 * 12. Enter normal cache mode and reenable caching
203 * 13. Set PGE
204 * 14. Wait for buddies to catch up
205 * 15. Enable interrupts.
206 *
207 * What does that mean for us? Well, first we set data.count to the number
208 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
209 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
210 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
211 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
212 * differently, so we call mtrr_if->set() callback and let them take care of it.
213 * When they're done, they again decrement data->count and wait for data.gate to
214 * be reset.
215 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
216 * Everyone then enables interrupts and we all continue on.
217 *
218 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
219 * becomes nops.
220 */
221static void set_mtrr(unsigned int reg, unsigned long base,
222 unsigned long size, mtrr_type type)
223{
224 struct set_mtrr_data data;
225 unsigned long flags;
226
227 data.smp_reg = reg;
228 data.smp_base = base;
229 data.smp_size = size;
230 data.smp_type = type;
231 atomic_set(&data.count, num_booting_cpus() - 1);
232 /* make sure data.count is visible before unleashing other CPUs */
233 smp_wmb();
234 atomic_set(&data.gate,0);
235
236 /* Start the ball rolling on other CPUs */
237 if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
238 panic("mtrr: timed out waiting for other CPUs\n");
239
240 local_irq_save(flags);
241
242 while(atomic_read(&data.count))
243 cpu_relax();
244
245 /* ok, reset count and toggle gate */
246 atomic_set(&data.count, num_booting_cpus() - 1);
247 smp_wmb();
248 atomic_set(&data.gate,1);
249
250 /* do our MTRR business */
251
252 /* HACK!
253 * We use this same function to initialize the mtrrs on boot.
254 * The state of the boot cpu's mtrrs has been saved, and we want
255 * to replicate across all the APs.
256 * If we're doing that @reg is set to something special...
257 */
258 if (reg != ~0U)
259 mtrr_if->set(reg,base,size,type);
260
261 /* wait for the others */
262 while(atomic_read(&data.count))
263 cpu_relax();
264
265 atomic_set(&data.count, num_booting_cpus() - 1);
266 smp_wmb();
267 atomic_set(&data.gate,0);
268
269 /*
270 * Wait here for everyone to have seen the gate change
271 * So we're the last ones to touch 'data'
272 */
273 while(atomic_read(&data.count))
274 cpu_relax();
275
276 local_irq_restore(flags);
277}
278
279/**
280 * mtrr_add_page - Add a memory type region
281 * @base: Physical base address of region in pages (in units of 4 kB!)
282 * @size: Physical size of region in pages (4 kB)
283 * @type: Type of MTRR desired
284 * @increment: If this is true do usage counting on the region
285 *
286 * Memory type region registers control the caching on newer Intel and
287 * non Intel processors. This function allows drivers to request an
288 * MTRR is added. The details and hardware specifics of each processor's
289 * implementation are hidden from the caller, but nevertheless the
290 * caller should expect to need to provide a power of two size on an
291 * equivalent power of two boundary.
292 *
293 * If the region cannot be added either because all regions are in use
294 * or the CPU cannot support it a negative value is returned. On success
295 * the register number for this entry is returned, but should be treated
296 * as a cookie only.
297 *
298 * On a multiprocessor machine the changes are made to all processors.
299 * This is required on x86 by the Intel processors.
300 *
301 * The available types are
302 *
303 * %MTRR_TYPE_UNCACHABLE - No caching
304 *
305 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
306 *
307 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
308 *
309 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
310 *
311 * BUGS: Needs a quiet flag for the cases where drivers do not mind
312 * failures and do not wish system log messages to be sent.
313 */
314
315int mtrr_add_page(unsigned long base, unsigned long size,
316 unsigned int type, char increment)
317{
318 int i, replace, error;
319 mtrr_type ltype;
320 unsigned long lbase, lsize;
321
322 if (!mtrr_if)
323 return -ENXIO;
324
325 if ((error = mtrr_if->validate_add_page(base,size,type)))
326 return error;
327
328 if (type >= MTRR_NUM_TYPES) {
329 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
330 return -EINVAL;
331 }
332
333 /* If the type is WC, check that this processor supports it */
334 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
335 printk(KERN_WARNING
336 "mtrr: your processor doesn't support write-combining\n");
337 return -ENOSYS;
338 }
339
340 if (!size) {
341 printk(KERN_WARNING "mtrr: zero sized request\n");
342 return -EINVAL;
343 }
344
345 if (base & size_or_mask || size & size_or_mask) {
346 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
347 return -EINVAL;
348 }
349
350 error = -EINVAL;
351 replace = -1;
352
353 /* No CPU hotplug when we change MTRR entries */
354 lock_cpu_hotplug();
355 /* Search for existing MTRR */
356 mutex_lock(&mtrr_mutex);
357 for (i = 0; i < num_var_ranges; ++i) {
358 mtrr_if->get(i, &lbase, &lsize, &ltype);
359 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
360 continue;
361 /* At this point we know there is some kind of overlap/enclosure */
362 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
363 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
364 /* New region encloses an existing region */
365 if (type == ltype) {
366 replace = replace == -1 ? i : -2;
367 continue;
368 }
369 else if (types_compatible(type, ltype))
370 continue;
371 }
372 printk(KERN_WARNING
373 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
374 " 0x%lx000,0x%lx000\n", base, size, lbase,
375 lsize);
376 goto out;
377 }
378 /* New region is enclosed by an existing region */
379 if (ltype != type) {
380 if (types_compatible(type, ltype))
381 continue;
382 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
383 base, size, mtrr_attrib_to_str(ltype),
384 mtrr_attrib_to_str(type));
385 goto out;
386 }
387 if (increment)
388 ++usage_table[i];
389 error = i;
390 goto out;
391 }
392 /* Search for an empty MTRR */
393 i = mtrr_if->get_free_region(base, size, replace);
394 if (i >= 0) {
395 set_mtrr(i, base, size, type);
396 if (likely(replace < 0))
397 usage_table[i] = 1;
398 else {
399 usage_table[i] = usage_table[replace] + !!increment;
400 if (unlikely(replace != i)) {
401 set_mtrr(replace, 0, 0, 0);
402 usage_table[replace] = 0;
403 }
404 }
405 } else
406 printk(KERN_INFO "mtrr: no more MTRRs available\n");
407 error = i;
408 out:
409 mutex_unlock(&mtrr_mutex);
410 unlock_cpu_hotplug();
411 return error;
412}
413
414static int mtrr_check(unsigned long base, unsigned long size)
415{
416 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
417 printk(KERN_WARNING
418 "mtrr: size and base must be multiples of 4 kiB\n");
419 printk(KERN_DEBUG
420 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
421 dump_stack();
422 return -1;
423 }
424 return 0;
425}
426
427/**
428 * mtrr_add - Add a memory type region
429 * @base: Physical base address of region
430 * @size: Physical size of region
431 * @type: Type of MTRR desired
432 * @increment: If this is true do usage counting on the region
433 *
434 * Memory type region registers control the caching on newer Intel and
435 * non Intel processors. This function allows drivers to request an
436 * MTRR is added. The details and hardware specifics of each processor's
437 * implementation are hidden from the caller, but nevertheless the
438 * caller should expect to need to provide a power of two size on an
439 * equivalent power of two boundary.
440 *
441 * If the region cannot be added either because all regions are in use
442 * or the CPU cannot support it a negative value is returned. On success
443 * the register number for this entry is returned, but should be treated
444 * as a cookie only.
445 *
446 * On a multiprocessor machine the changes are made to all processors.
447 * This is required on x86 by the Intel processors.
448 *
449 * The available types are
450 *
451 * %MTRR_TYPE_UNCACHABLE - No caching
452 *
453 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
454 *
455 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
456 *
457 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
458 *
459 * BUGS: Needs a quiet flag for the cases where drivers do not mind
460 * failures and do not wish system log messages to be sent.
461 */
462
463int
464mtrr_add(unsigned long base, unsigned long size, unsigned int type,
465 char increment)
466{
467 if (mtrr_check(base, size))
468 return -EINVAL;
469 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
470 increment);
471}
472
473/**
474 * mtrr_del_page - delete a memory type region
475 * @reg: Register returned by mtrr_add
476 * @base: Physical base address
477 * @size: Size of region
478 *
479 * If register is supplied then base and size are ignored. This is
480 * how drivers should call it.
481 *
482 * Releases an MTRR region. If the usage count drops to zero the
483 * register is freed and the region returns to default state.
484 * On success the register is returned, on failure a negative error
485 * code.
486 */
487
488int mtrr_del_page(int reg, unsigned long base, unsigned long size)
489{
490 int i, max;
491 mtrr_type ltype;
492 unsigned long lbase, lsize;
493 int error = -EINVAL;
494
495 if (!mtrr_if)
496 return -ENXIO;
497
498 max = num_var_ranges;
499 /* No CPU hotplug when we change MTRR entries */
500 lock_cpu_hotplug();
501 mutex_lock(&mtrr_mutex);
502 if (reg < 0) {
503 /* Search for existing MTRR */
504 for (i = 0; i < max; ++i) {
505 mtrr_if->get(i, &lbase, &lsize, &ltype);
506 if (lbase == base && lsize == size) {
507 reg = i;
508 break;
509 }
510 }
511 if (reg < 0) {
512 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
513 size);
514 goto out;
515 }
516 }
517 if (reg >= max) {
518 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
519 goto out;
520 }
521 if (is_cpu(CYRIX) && !use_intel()) {
522 if ((reg == 3) && arr3_protected) {
523 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
524 goto out;
525 }
526 }
527 mtrr_if->get(reg, &lbase, &lsize, &ltype);
528 if (lsize < 1) {
529 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
530 goto out;
531 }
532 if (usage_table[reg] < 1) {
533 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
534 goto out;
535 }
536 if (--usage_table[reg] < 1)
537 set_mtrr(reg, 0, 0, 0);
538 error = reg;
539 out:
540 mutex_unlock(&mtrr_mutex);
541 unlock_cpu_hotplug();
542 return error;
543}
544/**
545 * mtrr_del - delete a memory type region
546 * @reg: Register returned by mtrr_add
547 * @base: Physical base address
548 * @size: Size of region
549 *
550 * If register is supplied then base and size are ignored. This is
551 * how drivers should call it.
552 *
553 * Releases an MTRR region. If the usage count drops to zero the
554 * register is freed and the region returns to default state.
555 * On success the register is returned, on failure a negative error
556 * code.
557 */
558
559int
560mtrr_del(int reg, unsigned long base, unsigned long size)
561{
562 if (mtrr_check(base, size))
563 return -EINVAL;
564 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
565}
566
567EXPORT_SYMBOL(mtrr_add);
568EXPORT_SYMBOL(mtrr_del);
569
570/* HACK ALERT!
571 * These should be called implicitly, but we can't yet until all the initcall
572 * stuff is done...
573 */
574extern void amd_init_mtrr(void);
575extern void cyrix_init_mtrr(void);
576extern void centaur_init_mtrr(void);
577
578static void __init init_ifs(void)
579{
580#ifndef CONFIG_X86_64
581 amd_init_mtrr();
582 cyrix_init_mtrr();
583 centaur_init_mtrr();
584#endif
585}
586
587/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
588 * MTRR driver doesn't require this
589 */
590struct mtrr_value {
591 mtrr_type ltype;
592 unsigned long lbase;
593 unsigned long lsize;
594};
595
596static struct mtrr_value * mtrr_state;
597
598static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
599{
600 int i;
601 int size = num_var_ranges * sizeof(struct mtrr_value);
602
603 mtrr_state = kzalloc(size,GFP_ATOMIC);
604 if (!mtrr_state)
605 return -ENOMEM;
606
607 for (i = 0; i < num_var_ranges; i++) {
608 mtrr_if->get(i,
609 &mtrr_state[i].lbase,
610 &mtrr_state[i].lsize,
611 &mtrr_state[i].ltype);
612 }
613 return 0;
614}
615
616static int mtrr_restore(struct sys_device * sysdev)
617{
618 int i;
619
620 for (i = 0; i < num_var_ranges; i++) {
621 if (mtrr_state[i].lsize)
622 set_mtrr(i,
623 mtrr_state[i].lbase,
624 mtrr_state[i].lsize,
625 mtrr_state[i].ltype);
626 }
627 kfree(mtrr_state);
628 return 0;
629}
630
631
632
633static struct sysdev_driver mtrr_sysdev_driver = {
634 .suspend = mtrr_save,
635 .resume = mtrr_restore,
636};
637
638
639/**
640 * mtrr_bp_init - initialize mtrrs on the boot CPU
641 *
642 * This needs to be called early; before any of the other CPUs are
643 * initialized (i.e. before smp_init()).
644 *
645 */
646void __init mtrr_bp_init(void)
647{
648 init_ifs();
649
650 if (cpu_has_mtrr) {
651 mtrr_if = &generic_mtrr_ops;
652 size_or_mask = 0xff000000; /* 36 bits */
653 size_and_mask = 0x00f00000;
654
655 /* This is an AMD specific MSR, but we assume(hope?) that
656 Intel will implement it to when they extend the address
657 bus of the Xeon. */
658 if (cpuid_eax(0x80000000) >= 0x80000008) {
659 u32 phys_addr;
660 phys_addr = cpuid_eax(0x80000008) & 0xff;
661 /* CPUID workaround for Intel 0F33/0F34 CPU */
662 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
663 boot_cpu_data.x86 == 0xF &&
664 boot_cpu_data.x86_model == 0x3 &&
665 (boot_cpu_data.x86_mask == 0x3 ||
666 boot_cpu_data.x86_mask == 0x4))
667 phys_addr = 36;
668
669 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
670 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
671 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
672 boot_cpu_data.x86 == 6) {
673 /* VIA C* family have Intel style MTRRs, but
674 don't support PAE */
675 size_or_mask = 0xfff00000; /* 32 bits */
676 size_and_mask = 0;
677 }
678 } else {
679 switch (boot_cpu_data.x86_vendor) {
680 case X86_VENDOR_AMD:
681 if (cpu_has_k6_mtrr) {
682 /* Pre-Athlon (K6) AMD CPU MTRRs */
683 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
684 size_or_mask = 0xfff00000; /* 32 bits */
685 size_and_mask = 0;
686 }
687 break;
688 case X86_VENDOR_CENTAUR:
689 if (cpu_has_centaur_mcr) {
690 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
691 size_or_mask = 0xfff00000; /* 32 bits */
692 size_and_mask = 0;
693 }
694 break;
695 case X86_VENDOR_CYRIX:
696 if (cpu_has_cyrix_arr) {
697 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
698 size_or_mask = 0xfff00000; /* 32 bits */
699 size_and_mask = 0;
700 }
701 break;
702 default:
703 break;
704 }
705 }
706
707 if (mtrr_if) {
708 set_num_var_ranges();
709 init_table();
710 if (use_intel())
711 get_mtrr_state();
712 }
713}
714
715void mtrr_ap_init(void)
716{
717 unsigned long flags;
718
719 if (!mtrr_if || !use_intel())
720 return;
721 /*
722 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
723 * but this routine will be called in cpu boot time, holding the lock
724 * breaks it. This routine is called in two cases: 1.very earily time
725 * of software resume, when there absolutely isn't mtrr entry changes;
726 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
727 * prevent mtrr entry changes
728 */
729 local_irq_save(flags);
730
731 mtrr_if->set_all();
732
733 local_irq_restore(flags);
734}
735
736/**
737 * Save current fixed-range MTRR state of the BSP
738 */
739void mtrr_save_state(void)
740{
741 int cpu = get_cpu();
742
743 if (cpu == 0)
744 mtrr_save_fixed_ranges(NULL);
745 else
746 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
747 put_cpu();
748}
749
750static int __init mtrr_init_finialize(void)
751{
752 if (!mtrr_if)
753 return 0;
754 if (use_intel())
755 mtrr_state_warn();
756 else {
757 /* The CPUs haven't MTRR and seemes not support SMP. They have
758 * specific drivers, we use a tricky method to support
759 * suspend/resume for them.
760 * TBD: is there any system with such CPU which supports
761 * suspend/resume? if no, we should remove the code.
762 */
763 sysdev_driver_register(&cpu_sysdev_class,
764 &mtrr_sysdev_driver);
765 }
766 return 0;
767}
768subsys_initcall(mtrr_init_finialize);
diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h
deleted file mode 100644
index 289dfe6030e3..000000000000
--- a/arch/i386/kernel/cpu/mtrr/mtrr.h
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * local mtrr defines.
3 */
4
5#ifndef TRUE
6#define TRUE 1
7#define FALSE 0
8#endif
9
10#define MTRRcap_MSR 0x0fe
11#define MTRRdefType_MSR 0x2ff
12
13#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
14#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
15
16#define NUM_FIXED_RANGES 88
17#define MTRRfix64K_00000_MSR 0x250
18#define MTRRfix16K_80000_MSR 0x258
19#define MTRRfix16K_A0000_MSR 0x259
20#define MTRRfix4K_C0000_MSR 0x268
21#define MTRRfix4K_C8000_MSR 0x269
22#define MTRRfix4K_D0000_MSR 0x26a
23#define MTRRfix4K_D8000_MSR 0x26b
24#define MTRRfix4K_E0000_MSR 0x26c
25#define MTRRfix4K_E8000_MSR 0x26d
26#define MTRRfix4K_F0000_MSR 0x26e
27#define MTRRfix4K_F8000_MSR 0x26f
28
29#define MTRR_CHANGE_MASK_FIXED 0x01
30#define MTRR_CHANGE_MASK_VARIABLE 0x02
31#define MTRR_CHANGE_MASK_DEFTYPE 0x04
32
33/* In the Intel processor's MTRR interface, the MTRR type is always held in
34 an 8 bit field: */
35typedef u8 mtrr_type;
36
37struct mtrr_ops {
38 u32 vendor;
39 u32 use_intel_if;
40// void (*init)(void);
41 void (*set)(unsigned int reg, unsigned long base,
42 unsigned long size, mtrr_type type);
43 void (*set_all)(void);
44
45 void (*get)(unsigned int reg, unsigned long *base,
46 unsigned long *size, mtrr_type * type);
47 int (*get_free_region)(unsigned long base, unsigned long size,
48 int replace_reg);
49 int (*validate_add_page)(unsigned long base, unsigned long size,
50 unsigned int type);
51 int (*have_wrcomb)(void);
52};
53
54extern int generic_get_free_region(unsigned long base, unsigned long size,
55 int replace_reg);
56extern int generic_validate_add_page(unsigned long base, unsigned long size,
57 unsigned int type);
58
59extern struct mtrr_ops generic_mtrr_ops;
60
61extern int positive_have_wrcomb(void);
62
63/* library functions for processor-specific routines */
64struct set_mtrr_context {
65 unsigned long flags;
66 unsigned long cr4val;
67 u32 deftype_lo;
68 u32 deftype_hi;
69 u32 ccr3;
70};
71
72struct mtrr_var_range {
73 u32 base_lo;
74 u32 base_hi;
75 u32 mask_lo;
76 u32 mask_hi;
77};
78
79void set_mtrr_done(struct set_mtrr_context *ctxt);
80void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
81void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
82
83void get_mtrr_state(void);
84
85extern void set_mtrr_ops(struct mtrr_ops * ops);
86
87extern u64 size_or_mask, size_and_mask;
88extern struct mtrr_ops * mtrr_if;
89
90#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
91#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
92
93extern unsigned int num_var_ranges;
94
95void mtrr_state_warn(void);
96const char *mtrr_attrib_to_str(int x);
97void mtrr_wrmsr(unsigned, unsigned, unsigned);
98
diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c
deleted file mode 100644
index c9014ca4a575..000000000000
--- a/arch/i386/kernel/cpu/mtrr/state.c
+++ /dev/null
@@ -1,79 +0,0 @@
1#include <linux/mm.h>
2#include <linux/init.h>
3#include <asm/io.h>
4#include <asm/mtrr.h>
5#include <asm/msr.h>
6#include <asm-i386/processor-cyrix.h>
7#include "mtrr.h"
8
9
10/* Put the processor into a state where MTRRs can be safely set */
11void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
12{
13 unsigned int cr0;
14
15 /* Disable interrupts locally */
16 local_irq_save(ctxt->flags);
17
18 if (use_intel() || is_cpu(CYRIX)) {
19
20 /* Save value of CR4 and clear Page Global Enable (bit 7) */
21 if ( cpu_has_pge ) {
22 ctxt->cr4val = read_cr4();
23 write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
24 }
25
26 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
27 a side-effect */
28 cr0 = read_cr0() | 0x40000000;
29 wbinvd();
30 write_cr0(cr0);
31 wbinvd();
32
33 if (use_intel())
34 /* Save MTRR state */
35 rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
36 else
37 /* Cyrix ARRs - everything else were excluded at the top */
38 ctxt->ccr3 = getCx86(CX86_CCR3);
39 }
40}
41
42void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
43{
44 if (use_intel())
45 /* Disable MTRRs, and set the default type to uncached */
46 mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
47 ctxt->deftype_hi);
48 else if (is_cpu(CYRIX))
49 /* Cyrix ARRs - everything else were excluded at the top */
50 setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
51}
52
53/* Restore the processor after a set_mtrr_prepare */
54void set_mtrr_done(struct set_mtrr_context *ctxt)
55{
56 if (use_intel() || is_cpu(CYRIX)) {
57
58 /* Flush caches and TLBs */
59 wbinvd();
60
61 /* Restore MTRRdefType */
62 if (use_intel())
63 /* Intel (P6) standard MTRRs */
64 mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
65 else
66 /* Cyrix ARRs - everything else was excluded at the top */
67 setCx86(CX86_CCR3, ctxt->ccr3);
68
69 /* Enable caches */
70 write_cr0(read_cr0() & 0xbfffffff);
71
72 /* Restore value of CR4 */
73 if ( cpu_has_pge )
74 write_cr4(ctxt->cr4val);
75 }
76 /* Re-enable interrupts locally (if enabled previously) */
77 local_irq_restore(ctxt->flags);
78}
79