aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-03-18 20:00:14 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:19 -0400
commit2e5d9c857d4e6c9e7b7d8c8c86a68a7842d213d6 (patch)
tree6c90c0f9f38ff85e2f42ddc0f4ef0291cdd47d38 /arch/x86/kernel/cpu
parentd27554d874c7eeb14c8bfecdc39c3a8618cd8d32 (diff)
x86: PAT infrastructure patch
Sets up pat_init() infrastructure. PAT MSR has following setting. PAT |PCD ||PWT ||| 000 WB _PAGE_CACHE_WB 001 WC _PAGE_CACHE_WC 010 UC- _PAGE_CACHE_UC_MINUS 011 UC _PAGE_CACHE_UC We are effectively changing WT from boot time setting to WC. UC_MINUS is used to provide backward compatibility to existing /dev/mem users(X). reserve_memtype and free_memtype are new interfaces for maintaining alias-free mapping. It is currently implemented in a simple way with a linked list and not optimized. reserve and free tracks the effective memory type, as a result of PAT and MTRR setting rather than what is actually requested in PAT. pat_init piggy backs on mtrr_init as the rules for setting both pat and mtrr are same. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c120
1 files changed, 120 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 3e18db4cefee..011e07e99cd1 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -11,6 +11,7 @@
11#include <asm/cpufeature.h> 11#include <asm/cpufeature.h>
12#include <asm/processor-flags.h> 12#include <asm/processor-flags.h>
13#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
14#include <asm/pat.h>
14#include "mtrr.h" 15#include "mtrr.h"
15 16
16struct mtrr_state { 17struct mtrr_state {
@@ -35,6 +36,7 @@ static struct fixed_range_block fixed_range_blocks[] = {
35 36
36static unsigned long smp_changes_mask; 37static unsigned long smp_changes_mask;
37static struct mtrr_state mtrr_state = {}; 38static struct mtrr_state mtrr_state = {};
39static int mtrr_state_set;
38 40
39#undef MODULE_PARAM_PREFIX 41#undef MODULE_PARAM_PREFIX
40#define MODULE_PARAM_PREFIX "mtrr." 42#define MODULE_PARAM_PREFIX "mtrr."
@@ -42,6 +44,106 @@ static struct mtrr_state mtrr_state = {};
42static int mtrr_show; 44static int mtrr_show;
43module_param_named(show, mtrr_show, bool, 0); 45module_param_named(show, mtrr_show, bool, 0);
44 46
47/*
48 * Returns the effective MTRR type for the region
49 * Error returns:
50 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
51 * - 0xFF - when MTRR is not enabled
52 */
53u8 mtrr_type_lookup(u64 start, u64 end)
54{
55 int i;
56 u64 base, mask;
57 u8 prev_match, curr_match;
58
59 if (!mtrr_state_set)
60 return 0xFF;
61
62 if (!mtrr_state.enabled)
63 return 0xFF;
64
65 /* Make end inclusive end, instead of exclusive */
66 end--;
67
68 /* Look in fixed ranges. Just return the type as per start */
69 if (mtrr_state.have_fixed && (start < 0x100000)) {
70 int idx;
71
72 if (start < 0x80000) {
73 idx = 0;
74 idx += (start >> 16);
75 return mtrr_state.fixed_ranges[idx];
76 } else if (start < 0xC0000) {
77 idx = 1 * 8;
78 idx += ((start - 0x80000) >> 14);
79 return mtrr_state.fixed_ranges[idx];
80 } else if (start < 0x1000000) {
81 idx = 3 * 8;
82 idx += ((start - 0xC0000) >> 12);
83 return mtrr_state.fixed_ranges[idx];
84 }
85 }
86
87 /*
88 * Look in variable ranges
89 * Look of multiple ranges matching this address and pick type
90 * as per MTRR precedence
91 */
92 if (!mtrr_state.enabled & 2) {
93 return mtrr_state.def_type;
94 }
95
96 prev_match = 0xFF;
97 for (i = 0; i < num_var_ranges; ++i) {
98 unsigned short start_state, end_state;
99
100 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
101 continue;
102
103 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
104 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
105 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
106 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
107
108 start_state = ((start & mask) == (base & mask));
109 end_state = ((end & mask) == (base & mask));
110 if (start_state != end_state)
111 return 0xFE;
112
113 if ((start & mask) != (base & mask)) {
114 continue;
115 }
116
117 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
118 if (prev_match == 0xFF) {
119 prev_match = curr_match;
120 continue;
121 }
122
123 if (prev_match == MTRR_TYPE_UNCACHABLE ||
124 curr_match == MTRR_TYPE_UNCACHABLE) {
125 return MTRR_TYPE_UNCACHABLE;
126 }
127
128 if ((prev_match == MTRR_TYPE_WRBACK &&
129 curr_match == MTRR_TYPE_WRTHROUGH) ||
130 (prev_match == MTRR_TYPE_WRTHROUGH &&
131 curr_match == MTRR_TYPE_WRBACK)) {
132 prev_match = MTRR_TYPE_WRTHROUGH;
133 curr_match = MTRR_TYPE_WRTHROUGH;
134 }
135
136 if (prev_match != curr_match) {
137 return MTRR_TYPE_UNCACHABLE;
138 }
139 }
140
141 if (prev_match != 0xFF)
142 return prev_match;
143
144 return mtrr_state.def_type;
145}
146
45/* Get the MSR pair relating to a var range */ 147/* Get the MSR pair relating to a var range */
46static void 148static void
47get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 149get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
@@ -79,12 +181,16 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
79 base, base + step - 1, mtrr_attrib_to_str(*types)); 181 base, base + step - 1, mtrr_attrib_to_str(*types));
80} 182}
81 183
184static void prepare_set(void);
185static void post_set(void);
186
82/* Grab all of the MTRR state for this CPU into *state */ 187/* Grab all of the MTRR state for this CPU into *state */
83void __init get_mtrr_state(void) 188void __init get_mtrr_state(void)
84{ 189{
85 unsigned int i; 190 unsigned int i;
86 struct mtrr_var_range *vrs; 191 struct mtrr_var_range *vrs;
87 unsigned lo, dummy; 192 unsigned lo, dummy;
193 unsigned long flags;
88 194
89 vrs = mtrr_state.var_ranges; 195 vrs = mtrr_state.var_ranges;
90 196
@@ -131,6 +237,17 @@ void __init get_mtrr_state(void)
131 printk(KERN_INFO "MTRR %u disabled\n", i); 237 printk(KERN_INFO "MTRR %u disabled\n", i);
132 } 238 }
133 } 239 }
240 mtrr_state_set = 1;
241
242 /* PAT setup for BP. We need to go through sync steps here */
243 local_irq_save(flags);
244 prepare_set();
245
246 pat_init();
247
248 post_set();
249 local_irq_restore(flags);
250
134} 251}
135 252
136/* Some BIOS's are fucked and don't set all MTRRs the same! */ 253/* Some BIOS's are fucked and don't set all MTRRs the same! */
@@ -397,6 +514,9 @@ static void generic_set_all(void)
397 /* Actually set the state */ 514 /* Actually set the state */
398 mask = set_mtrr_state(); 515 mask = set_mtrr_state();
399 516
517 /* also set PAT */
518 pat_init();
519
400 post_set(); 520 post_set();
401 local_irq_restore(flags); 521 local_irq_restore(flags);
402 522