aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/mtrr/cyrix.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/mtrr/cyrix.c')
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c364
1 files changed, 364 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
new file mode 100644
index 000000000000..933b0dd62f48
--- /dev/null
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -0,0 +1,364 @@
1#include <linux/init.h>
2#include <linux/mm.h>
3#include <asm/mtrr.h>
4#include <asm/msr.h>
5#include <asm/io.h>
6#include "mtrr.h"
7
8int arr3_protected;
9
10static void
11cyrix_get_arr(unsigned int reg, unsigned long *base,
12 unsigned int *size, mtrr_type * type)
13{
14 unsigned long flags;
15 unsigned char arr, ccr3, rcr, shift;
16
17 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
18
19 /* Save flags and disable interrupts */
20 local_irq_save(flags);
21
22 ccr3 = getCx86(CX86_CCR3);
23 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
24 ((unsigned char *) base)[3] = getCx86(arr);
25 ((unsigned char *) base)[2] = getCx86(arr + 1);
26 ((unsigned char *) base)[1] = getCx86(arr + 2);
27 rcr = getCx86(CX86_RCR_BASE + reg);
28 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
29
30 /* Enable interrupts if it was enabled previously */
31 local_irq_restore(flags);
32 shift = ((unsigned char *) base)[1] & 0x0f;
33 *base >>= PAGE_SHIFT;
34
35 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
36 * Note: shift==0xf means 4G, this is unsupported.
37 */
38 if (shift)
39 *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
40 else
41 *size = 0;
42
43 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
44 if (reg < 7) {
45 switch (rcr) {
46 case 1:
47 *type = MTRR_TYPE_UNCACHABLE;
48 break;
49 case 8:
50 *type = MTRR_TYPE_WRBACK;
51 break;
52 case 9:
53 *type = MTRR_TYPE_WRCOMB;
54 break;
55 case 24:
56 default:
57 *type = MTRR_TYPE_WRTHROUGH;
58 break;
59 }
60 } else {
61 switch (rcr) {
62 case 0:
63 *type = MTRR_TYPE_UNCACHABLE;
64 break;
65 case 8:
66 *type = MTRR_TYPE_WRCOMB;
67 break;
68 case 9:
69 *type = MTRR_TYPE_WRBACK;
70 break;
71 case 25:
72 default:
73 *type = MTRR_TYPE_WRTHROUGH;
74 break;
75 }
76 }
77}
78
79static int
80cyrix_get_free_region(unsigned long base, unsigned long size)
81/* [SUMMARY] Get a free ARR.
82 <base> The starting (base) address of the region.
83 <size> The size (in bytes) of the region.
84 [RETURNS] The index of the region on success, else -1 on error.
85*/
86{
87 int i;
88 mtrr_type ltype;
89 unsigned long lbase;
90 unsigned int lsize;
91
92 /* If we are to set up a region >32M then look at ARR7 immediately */
93 if (size > 0x2000) {
94 cyrix_get_arr(7, &lbase, &lsize, &ltype);
95 if (lsize == 0)
96 return 7;
97 /* Else try ARR0-ARR6 first */
98 } else {
99 for (i = 0; i < 7; i++) {
100 cyrix_get_arr(i, &lbase, &lsize, &ltype);
101 if ((i == 3) && arr3_protected)
102 continue;
103 if (lsize == 0)
104 return i;
105 }
106 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
107 cyrix_get_arr(i, &lbase, &lsize, &ltype);
108 if ((lsize == 0) && (size >= 0x40))
109 return i;
110 }
111 return -ENOSPC;
112}
113
114static u32 cr4 = 0;
115static u32 ccr3;
116
117static void prepare_set(void)
118{
119 u32 cr0;
120
121 /* Save value of CR4 and clear Page Global Enable (bit 7) */
122 if ( cpu_has_pge ) {
123 cr4 = read_cr4();
124 write_cr4(cr4 & (unsigned char) ~(1 << 7));
125 }
126
127 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
128 a side-effect */
129 cr0 = read_cr0() | 0x40000000;
130 wbinvd();
131 write_cr0(cr0);
132 wbinvd();
133
134 /* Cyrix ARRs - everything else were excluded at the top */
135 ccr3 = getCx86(CX86_CCR3);
136
137 /* Cyrix ARRs - everything else were excluded at the top */
138 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
139
140}
141
142static void post_set(void)
143{
144 /* Flush caches and TLBs */
145 wbinvd();
146
147 /* Cyrix ARRs - everything else was excluded at the top */
148 setCx86(CX86_CCR3, ccr3);
149
150 /* Enable caches */
151 write_cr0(read_cr0() & 0xbfffffff);
152
153 /* Restore value of CR4 */
154 if ( cpu_has_pge )
155 write_cr4(cr4);
156}
157
158static void cyrix_set_arr(unsigned int reg, unsigned long base,
159 unsigned long size, mtrr_type type)
160{
161 unsigned char arr, arr_type, arr_size;
162
163 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
164
165 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
166 if (reg >= 7)
167 size >>= 6;
168
169 size &= 0x7fff; /* make sure arr_size <= 14 */
170 for (arr_size = 0; size; arr_size++, size >>= 1) ;
171
172 if (reg < 7) {
173 switch (type) {
174 case MTRR_TYPE_UNCACHABLE:
175 arr_type = 1;
176 break;
177 case MTRR_TYPE_WRCOMB:
178 arr_type = 9;
179 break;
180 case MTRR_TYPE_WRTHROUGH:
181 arr_type = 24;
182 break;
183 default:
184 arr_type = 8;
185 break;
186 }
187 } else {
188 switch (type) {
189 case MTRR_TYPE_UNCACHABLE:
190 arr_type = 0;
191 break;
192 case MTRR_TYPE_WRCOMB:
193 arr_type = 8;
194 break;
195 case MTRR_TYPE_WRTHROUGH:
196 arr_type = 25;
197 break;
198 default:
199 arr_type = 9;
200 break;
201 }
202 }
203
204 prepare_set();
205
206 base <<= PAGE_SHIFT;
207 setCx86(arr, ((unsigned char *) &base)[3]);
208 setCx86(arr + 1, ((unsigned char *) &base)[2]);
209 setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
210 setCx86(CX86_RCR_BASE + reg, arr_type);
211
212 post_set();
213}
214
215typedef struct {
216 unsigned long base;
217 unsigned int size;
218 mtrr_type type;
219} arr_state_t;
220
221static arr_state_t arr_state[8] __initdata = {
222 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
223 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
224};
225
226static unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
227
228static void cyrix_set_all(void)
229{
230 int i;
231
232 prepare_set();
233
234 /* the CCRs are not contiguous */
235 for (i = 0; i < 4; i++)
236 setCx86(CX86_CCR0 + i, ccr_state[i]);
237 for (; i < 7; i++)
238 setCx86(CX86_CCR4 + i, ccr_state[i]);
239 for (i = 0; i < 8; i++)
240 cyrix_set_arr(i, arr_state[i].base,
241 arr_state[i].size, arr_state[i].type);
242
243 post_set();
244}
245
246#if 0
247/*
248 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
249 * with the SMM (System Management Mode) mode. So we need the following:
250 * Check whether SMI_LOCK (CCR3 bit 0) is set
251 * if it is set, write a warning message: ARR3 cannot be changed!
252 * (it cannot be changed until the next processor reset)
253 * if it is reset, then we can change it, set all the needed bits:
254 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
255 * - disable access to SMM memory (CCR1 bit 2 reset)
256 * - disable SMM mode (CCR1 bit 1 reset)
257 * - disable write protection of ARR3 (CCR6 bit 1 reset)
258 * - (maybe) disable ARR3
259 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
260 */
261static void __init
262cyrix_arr_init(void)
263{
264 struct set_mtrr_context ctxt;
265 unsigned char ccr[7];
266 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
267#ifdef CONFIG_SMP
268 int i;
269#endif
270
271 /* flush cache and enable MAPEN */
272 set_mtrr_prepare_save(&ctxt);
273 set_mtrr_cache_disable(&ctxt);
274
275 /* Save all CCRs locally */
276 ccr[0] = getCx86(CX86_CCR0);
277 ccr[1] = getCx86(CX86_CCR1);
278 ccr[2] = getCx86(CX86_CCR2);
279 ccr[3] = ctxt.ccr3;
280 ccr[4] = getCx86(CX86_CCR4);
281 ccr[5] = getCx86(CX86_CCR5);
282 ccr[6] = getCx86(CX86_CCR6);
283
284 if (ccr[3] & 1) {
285 ccrc[3] = 1;
286 arr3_protected = 1;
287 } else {
288 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
289 * access to SMM memory through ARR3 (bit 7).
290 */
291 if (ccr[1] & 0x80) {
292 ccr[1] &= 0x7f;
293 ccrc[1] |= 0x80;
294 }
295 if (ccr[1] & 0x04) {
296 ccr[1] &= 0xfb;
297 ccrc[1] |= 0x04;
298 }
299 if (ccr[1] & 0x02) {
300 ccr[1] &= 0xfd;
301 ccrc[1] |= 0x02;
302 }
303 arr3_protected = 0;
304 if (ccr[6] & 0x02) {
305 ccr[6] &= 0xfd;
306 ccrc[6] = 1; /* Disable write protection of ARR3 */
307 setCx86(CX86_CCR6, ccr[6]);
308 }
309 /* Disable ARR3. This is safe now that we disabled SMM. */
310 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
311 }
312 /* If we changed CCR1 in memory, change it in the processor, too. */
313 if (ccrc[1])
314 setCx86(CX86_CCR1, ccr[1]);
315
316 /* Enable ARR usage by the processor */
317 if (!(ccr[5] & 0x20)) {
318 ccr[5] |= 0x20;
319 ccrc[5] = 1;
320 setCx86(CX86_CCR5, ccr[5]);
321 }
322#ifdef CONFIG_SMP
323 for (i = 0; i < 7; i++)
324 ccr_state[i] = ccr[i];
325 for (i = 0; i < 8; i++)
326 cyrix_get_arr(i,
327 &arr_state[i].base, &arr_state[i].size,
328 &arr_state[i].type);
329#endif
330
331 set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
332
333 if (ccrc[5])
334 printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
335 if (ccrc[3])
336 printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
337/*
338 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
339 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
340 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
341*/
342 if (ccrc[6])
343 printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
344}
345#endif
346
347static struct mtrr_ops cyrix_mtrr_ops = {
348 .vendor = X86_VENDOR_CYRIX,
349// .init = cyrix_arr_init,
350 .set_all = cyrix_set_all,
351 .set = cyrix_set_arr,
352 .get = cyrix_get_arr,
353 .get_free_region = cyrix_get_free_region,
354 .validate_add_page = generic_validate_add_page,
355 .have_wrcomb = positive_have_wrcomb,
356};
357
358int __init cyrix_init_mtrr(void)
359{
360 set_mtrr_ops(&cyrix_mtrr_ops);
361 return 0;
362}
363
364//arch_initcall(cyrix_init_mtrr);