aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/mtrr/cyrix.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/mtrr/cyrix.c')
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c380
1 files changed, 380 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
new file mode 100644
index 000000000000..2287d4863a8a
--- /dev/null
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -0,0 +1,380 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5#include <asm/io.h>
6#include <asm/processor-cyrix.h>
7#include "mtrr.h"
8
9int arr3_protected;
10
11static void
12cyrix_get_arr(unsigned int reg, unsigned long *base,
13 unsigned long *size, mtrr_type * type)
14{
15 unsigned long flags;
16 unsigned char arr, ccr3, rcr, shift;
17
18 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
19
20 /* Save flags and disable interrupts */
21 local_irq_save(flags);
22
23 ccr3 = getCx86(CX86_CCR3);
24 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
25 ((unsigned char *) base)[3] = getCx86(arr);
26 ((unsigned char *) base)[2] = getCx86(arr + 1);
27 ((unsigned char *) base)[1] = getCx86(arr + 2);
28 rcr = getCx86(CX86_RCR_BASE + reg);
29 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
30
31 /* Enable interrupts if it was enabled previously */
32 local_irq_restore(flags);
33 shift = ((unsigned char *) base)[1] & 0x0f;
34 *base >>= PAGE_SHIFT;
35
36 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
37 * Note: shift==0xf means 4G, this is unsupported.
38 */
39 if (shift)
40 *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
41 else
42 *size = 0;
43
44 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
45 if (reg < 7) {
46 switch (rcr) {
47 case 1:
48 *type = MTRR_TYPE_UNCACHABLE;
49 break;
50 case 8:
51 *type = MTRR_TYPE_WRBACK;
52 break;
53 case 9:
54 *type = MTRR_TYPE_WRCOMB;
55 break;
56 case 24:
57 default:
58 *type = MTRR_TYPE_WRTHROUGH;
59 break;
60 }
61 } else {
62 switch (rcr) {
63 case 0:
64 *type = MTRR_TYPE_UNCACHABLE;
65 break;
66 case 8:
67 *type = MTRR_TYPE_WRCOMB;
68 break;
69 case 9:
70 *type = MTRR_TYPE_WRBACK;
71 break;
72 case 25:
73 default:
74 *type = MTRR_TYPE_WRTHROUGH;
75 break;
76 }
77 }
78}
79
80static int
81cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
82/* [SUMMARY] Get a free ARR.
83 <base> The starting (base) address of the region.
84 <size> The size (in bytes) of the region.
85 [RETURNS] The index of the region on success, else -1 on error.
86*/
87{
88 int i;
89 mtrr_type ltype;
90 unsigned long lbase, lsize;
91
92 switch (replace_reg) {
93 case 7:
94 if (size < 0x40)
95 break;
96 case 6:
97 case 5:
98 case 4:
99 return replace_reg;
100 case 3:
101 if (arr3_protected)
102 break;
103 case 2:
104 case 1:
105 case 0:
106 return replace_reg;
107 }
108 /* If we are to set up a region >32M then look at ARR7 immediately */
109 if (size > 0x2000) {
110 cyrix_get_arr(7, &lbase, &lsize, &ltype);
111 if (lsize == 0)
112 return 7;
113 /* Else try ARR0-ARR6 first */
114 } else {
115 for (i = 0; i < 7; i++) {
116 cyrix_get_arr(i, &lbase, &lsize, &ltype);
117 if ((i == 3) && arr3_protected)
118 continue;
119 if (lsize == 0)
120 return i;
121 }
122 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
123 cyrix_get_arr(i, &lbase, &lsize, &ltype);
124 if ((lsize == 0) && (size >= 0x40))
125 return i;
126 }
127 return -ENOSPC;
128}
129
130static u32 cr4 = 0;
131static u32 ccr3;
132
133static void prepare_set(void)
134{
135 u32 cr0;
136
137 /* Save value of CR4 and clear Page Global Enable (bit 7) */
138 if ( cpu_has_pge ) {
139 cr4 = read_cr4();
140 write_cr4(cr4 & ~X86_CR4_PGE);
141 }
142
143 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
144 a side-effect */
145 cr0 = read_cr0() | 0x40000000;
146 wbinvd();
147 write_cr0(cr0);
148 wbinvd();
149
150 /* Cyrix ARRs - everything else were excluded at the top */
151 ccr3 = getCx86(CX86_CCR3);
152
153 /* Cyrix ARRs - everything else were excluded at the top */
154 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
155
156}
157
158static void post_set(void)
159{
160 /* Flush caches and TLBs */
161 wbinvd();
162
163 /* Cyrix ARRs - everything else was excluded at the top */
164 setCx86(CX86_CCR3, ccr3);
165
166 /* Enable caches */
167 write_cr0(read_cr0() & 0xbfffffff);
168
169 /* Restore value of CR4 */
170 if ( cpu_has_pge )
171 write_cr4(cr4);
172}
173
174static void cyrix_set_arr(unsigned int reg, unsigned long base,
175 unsigned long size, mtrr_type type)
176{
177 unsigned char arr, arr_type, arr_size;
178
179 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
180
181 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
182 if (reg >= 7)
183 size >>= 6;
184
185 size &= 0x7fff; /* make sure arr_size <= 14 */
186 for (arr_size = 0; size; arr_size++, size >>= 1) ;
187
188 if (reg < 7) {
189 switch (type) {
190 case MTRR_TYPE_UNCACHABLE:
191 arr_type = 1;
192 break;
193 case MTRR_TYPE_WRCOMB:
194 arr_type = 9;
195 break;
196 case MTRR_TYPE_WRTHROUGH:
197 arr_type = 24;
198 break;
199 default:
200 arr_type = 8;
201 break;
202 }
203 } else {
204 switch (type) {
205 case MTRR_TYPE_UNCACHABLE:
206 arr_type = 0;
207 break;
208 case MTRR_TYPE_WRCOMB:
209 arr_type = 8;
210 break;
211 case MTRR_TYPE_WRTHROUGH:
212 arr_type = 25;
213 break;
214 default:
215 arr_type = 9;
216 break;
217 }
218 }
219
220 prepare_set();
221
222 base <<= PAGE_SHIFT;
223 setCx86(arr, ((unsigned char *) &base)[3]);
224 setCx86(arr + 1, ((unsigned char *) &base)[2]);
225 setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
226 setCx86(CX86_RCR_BASE + reg, arr_type);
227
228 post_set();
229}
230
231typedef struct {
232 unsigned long base;
233 unsigned long size;
234 mtrr_type type;
235} arr_state_t;
236
237static arr_state_t arr_state[8] = {
238 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
239 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
240};
241
242static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
243
244static void cyrix_set_all(void)
245{
246 int i;
247
248 prepare_set();
249
250 /* the CCRs are not contiguous */
251 for (i = 0; i < 4; i++)
252 setCx86(CX86_CCR0 + i, ccr_state[i]);
253 for (; i < 7; i++)
254 setCx86(CX86_CCR4 + i, ccr_state[i]);
255 for (i = 0; i < 8; i++)
256 cyrix_set_arr(i, arr_state[i].base,
257 arr_state[i].size, arr_state[i].type);
258
259 post_set();
260}
261
262#if 0
263/*
264 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
265 * with the SMM (System Management Mode) mode. So we need the following:
266 * Check whether SMI_LOCK (CCR3 bit 0) is set
267 * if it is set, write a warning message: ARR3 cannot be changed!
268 * (it cannot be changed until the next processor reset)
269 * if it is reset, then we can change it, set all the needed bits:
270 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
271 * - disable access to SMM memory (CCR1 bit 2 reset)
272 * - disable SMM mode (CCR1 bit 1 reset)
273 * - disable write protection of ARR3 (CCR6 bit 1 reset)
274 * - (maybe) disable ARR3
275 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
276 */
277static void __init
278cyrix_arr_init(void)
279{
280 struct set_mtrr_context ctxt;
281 unsigned char ccr[7];
282 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
283#ifdef CONFIG_SMP
284 int i;
285#endif
286
287 /* flush cache and enable MAPEN */
288 set_mtrr_prepare_save(&ctxt);
289 set_mtrr_cache_disable(&ctxt);
290
291 /* Save all CCRs locally */
292 ccr[0] = getCx86(CX86_CCR0);
293 ccr[1] = getCx86(CX86_CCR1);
294 ccr[2] = getCx86(CX86_CCR2);
295 ccr[3] = ctxt.ccr3;
296 ccr[4] = getCx86(CX86_CCR4);
297 ccr[5] = getCx86(CX86_CCR5);
298 ccr[6] = getCx86(CX86_CCR6);
299
300 if (ccr[3] & 1) {
301 ccrc[3] = 1;
302 arr3_protected = 1;
303 } else {
304 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
305 * access to SMM memory through ARR3 (bit 7).
306 */
307 if (ccr[1] & 0x80) {
308 ccr[1] &= 0x7f;
309 ccrc[1] |= 0x80;
310 }
311 if (ccr[1] & 0x04) {
312 ccr[1] &= 0xfb;
313 ccrc[1] |= 0x04;
314 }
315 if (ccr[1] & 0x02) {
316 ccr[1] &= 0xfd;
317 ccrc[1] |= 0x02;
318 }
319 arr3_protected = 0;
320 if (ccr[6] & 0x02) {
321 ccr[6] &= 0xfd;
322 ccrc[6] = 1; /* Disable write protection of ARR3 */
323 setCx86(CX86_CCR6, ccr[6]);
324 }
325 /* Disable ARR3. This is safe now that we disabled SMM. */
326 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
327 }
328 /* If we changed CCR1 in memory, change it in the processor, too. */
329 if (ccrc[1])
330 setCx86(CX86_CCR1, ccr[1]);
331
332 /* Enable ARR usage by the processor */
333 if (!(ccr[5] & 0x20)) {
334 ccr[5] |= 0x20;
335 ccrc[5] = 1;
336 setCx86(CX86_CCR5, ccr[5]);
337 }
338#ifdef CONFIG_SMP
339 for (i = 0; i < 7; i++)
340 ccr_state[i] = ccr[i];
341 for (i = 0; i < 8; i++)
342 cyrix_get_arr(i,
343 &arr_state[i].base, &arr_state[i].size,
344 &arr_state[i].type);
345#endif
346
347 set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
348
349 if (ccrc[5])
350 printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
351 if (ccrc[3])
352 printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
353/*
354 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
355 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
356 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
357*/
358 if (ccrc[6])
359 printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
360}
361#endif
362
363static struct mtrr_ops cyrix_mtrr_ops = {
364 .vendor = X86_VENDOR_CYRIX,
365// .init = cyrix_arr_init,
366 .set_all = cyrix_set_all,
367 .set = cyrix_set_arr,
368 .get = cyrix_get_arr,
369 .get_free_region = cyrix_get_free_region,
370 .validate_add_page = generic_validate_add_page,
371 .have_wrcomb = positive_have_wrcomb,
372};
373
374int __init cyrix_init_mtrr(void)
375{
376 set_mtrr_ops(&cyrix_mtrr_ops);
377 return 0;
378}
379
380//arch_initcall(cyrix_init_mtrr);