aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-04 10:48:01 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-04 10:48:01 -0500
commit9a6b344ea967efa0bb5ca4cb5405f840652b66c4 (patch)
tree60b06bf3ec6c27a942a0fa4b03d28b0e4ca353e9 /arch/x86
parente1adbcf10608c83de6a81a02ebce859611433b52 (diff)
x86: remove long dead cyrix mtrr code
cyrix_arr_init was #if 0 all the way back to at least v2.6.12. This was the only place where arr3_protected was set to anything but zero. Eliminate this variable. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c107
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c12
2 files changed, 0 insertions, 119 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 8e139c70f888..ff14c320040c 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -7,8 +7,6 @@
7#include <asm/processor-flags.h> 7#include <asm/processor-flags.h>
8#include "mtrr.h" 8#include "mtrr.h"
9 9
10int arr3_protected;
11
12static void 10static void
13cyrix_get_arr(unsigned int reg, unsigned long *base, 11cyrix_get_arr(unsigned int reg, unsigned long *base,
14 unsigned long *size, mtrr_type * type) 12 unsigned long *size, mtrr_type * type)
@@ -99,8 +97,6 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
99 case 4: 97 case 4:
100 return replace_reg; 98 return replace_reg;
101 case 3: 99 case 3:
102 if (arr3_protected)
103 break;
104 case 2: 100 case 2:
105 case 1: 101 case 1:
106 case 0: 102 case 0:
@@ -115,8 +111,6 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
115 } else { 111 } else {
116 for (i = 0; i < 7; i++) { 112 for (i = 0; i < 7; i++) {
117 cyrix_get_arr(i, &lbase, &lsize, &ltype); 113 cyrix_get_arr(i, &lbase, &lsize, &ltype);
118 if ((i == 3) && arr3_protected)
119 continue;
120 if (lsize == 0) 114 if (lsize == 0)
121 return i; 115 return i;
122 } 116 }
@@ -260,107 +254,6 @@ static void cyrix_set_all(void)
260 post_set(); 254 post_set();
261} 255}
262 256
263#if 0
264/*
265 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
266 * with the SMM (System Management Mode) mode. So we need the following:
267 * Check whether SMI_LOCK (CCR3 bit 0) is set
268 * if it is set, write a warning message: ARR3 cannot be changed!
269 * (it cannot be changed until the next processor reset)
270 * if it is reset, then we can change it, set all the needed bits:
271 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
272 * - disable access to SMM memory (CCR1 bit 2 reset)
273 * - disable SMM mode (CCR1 bit 1 reset)
274 * - disable write protection of ARR3 (CCR6 bit 1 reset)
275 * - (maybe) disable ARR3
276 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
277 */
278static void __init
279cyrix_arr_init(void)
280{
281 struct set_mtrr_context ctxt;
282 unsigned char ccr[7];
283 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
284#ifdef CONFIG_SMP
285 int i;
286#endif
287
288 /* flush cache and enable MAPEN */
289 set_mtrr_prepare_save(&ctxt);
290 set_mtrr_cache_disable(&ctxt);
291
292 /* Save all CCRs locally */
293 ccr[0] = getCx86(CX86_CCR0);
294 ccr[1] = getCx86(CX86_CCR1);
295 ccr[2] = getCx86(CX86_CCR2);
296 ccr[3] = ctxt.ccr3;
297 ccr[4] = getCx86(CX86_CCR4);
298 ccr[5] = getCx86(CX86_CCR5);
299 ccr[6] = getCx86(CX86_CCR6);
300
301 if (ccr[3] & 1) {
302 ccrc[3] = 1;
303 arr3_protected = 1;
304 } else {
305 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
306 * access to SMM memory through ARR3 (bit 7).
307 */
308 if (ccr[1] & 0x80) {
309 ccr[1] &= 0x7f;
310 ccrc[1] |= 0x80;
311 }
312 if (ccr[1] & 0x04) {
313 ccr[1] &= 0xfb;
314 ccrc[1] |= 0x04;
315 }
316 if (ccr[1] & 0x02) {
317 ccr[1] &= 0xfd;
318 ccrc[1] |= 0x02;
319 }
320 arr3_protected = 0;
321 if (ccr[6] & 0x02) {
322 ccr[6] &= 0xfd;
323 ccrc[6] = 1; /* Disable write protection of ARR3 */
324 setCx86(CX86_CCR6, ccr[6]);
325 }
326 /* Disable ARR3. This is safe now that we disabled SMM. */
327 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
328 }
329 /* If we changed CCR1 in memory, change it in the processor, too. */
330 if (ccrc[1])
331 setCx86(CX86_CCR1, ccr[1]);
332
333 /* Enable ARR usage by the processor */
334 if (!(ccr[5] & 0x20)) {
335 ccr[5] |= 0x20;
336 ccrc[5] = 1;
337 setCx86(CX86_CCR5, ccr[5]);
338 }
339#ifdef CONFIG_SMP
340 for (i = 0; i < 7; i++)
341 ccr_state[i] = ccr[i];
342 for (i = 0; i < 8; i++)
343 cyrix_get_arr(i,
344 &arr_state[i].base, &arr_state[i].size,
345 &arr_state[i].type);
346#endif
347
348 set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
349
350 if (ccrc[5])
351 printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
352 if (ccrc[3])
353 printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
354/*
355 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
356 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
357 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
358*/
359 if (ccrc[6])
360 printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
361}
362#endif
363
364static struct mtrr_ops cyrix_mtrr_ops = { 257static struct mtrr_ops cyrix_mtrr_ops = {
365 .vendor = X86_VENDOR_CYRIX, 258 .vendor = X86_VENDOR_CYRIX,
366// .init = cyrix_arr_init, 259// .init = cyrix_arr_init,
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 715919582657..822d8f90c1eb 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -59,12 +59,6 @@ struct mtrr_ops * mtrr_if = NULL;
59static void set_mtrr(unsigned int reg, unsigned long base, 59static void set_mtrr(unsigned int reg, unsigned long base,
60 unsigned long size, mtrr_type type); 60 unsigned long size, mtrr_type type);
61 61
62#ifndef CONFIG_X86_64
63extern int arr3_protected;
64#else
65#define arr3_protected 0
66#endif
67
68void set_mtrr_ops(struct mtrr_ops * ops) 62void set_mtrr_ops(struct mtrr_ops * ops)
69{ 63{
70 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) 64 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
@@ -513,12 +507,6 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
513 printk(KERN_WARNING "mtrr: register: %d too big\n", reg); 507 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
514 goto out; 508 goto out;
515 } 509 }
516 if (is_cpu(CYRIX) && !use_intel()) {
517 if ((reg == 3) && arr3_protected) {
518 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
519 goto out;
520 }
521 }
522 mtrr_if->get(reg, &lbase, &lsize, &ltype); 510 mtrr_if->get(reg, &lbase, &lsize, &ltype);
523 if (lsize < 1) { 511 if (lsize < 1) {
524 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); 512 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);