diff options
-rw-r--r-- | arch/x86/Kconfig | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 120 | ||||
-rw-r--r-- | arch/x86/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 402 | ||||
-rw-r--r-- | include/asm-x86/cpufeature.h | 1 | ||||
-rw-r--r-- | include/asm-x86/msr-index.h | 2 | ||||
-rw-r--r-- | include/asm-x86/mtrr.h | 2 | ||||
-rw-r--r-- | include/asm-x86/pat.h | 16 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 6 |
10 files changed, 568 insertions, 3 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fd27048087b8..5b46756e4c7c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1009,6 +1009,21 @@ config MTRR | |||
1009 | 1009 | ||
1010 | See <file:Documentation/mtrr.txt> for more information. | 1010 | See <file:Documentation/mtrr.txt> for more information. |
1011 | 1011 | ||
1012 | config X86_PAT | ||
1013 | def_bool y | ||
1014 | prompt "x86 PAT support" | ||
1015 | depends on MTRR && NONPROMISC_DEVMEM | ||
1016 | help | ||
1017 | Use PAT attributes to setup page level cache control. | ||
1018 | ---help--- | ||
1019 | PATs are the modern equivalents of MTRRs and are much more | ||
1020 | flexible than MTRRs. | ||
1021 | |||
1022 | Say N here if you see bootup problems (boot crash, boot hang, | ||
1023 | spontaneous reboots) or a non-working Xorg. | ||
1024 | |||
1025 | If unsure, say Y. | ||
1026 | |||
1012 | config EFI | 1027 | config EFI |
1013 | def_bool n | 1028 | def_bool n |
1014 | prompt "EFI runtime service support" | 1029 | prompt "EFI runtime service support" |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 3e18db4cefee..011e07e99cd1 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/cpufeature.h> | 11 | #include <asm/cpufeature.h> |
12 | #include <asm/processor-flags.h> | 12 | #include <asm/processor-flags.h> |
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/pat.h> | ||
14 | #include "mtrr.h" | 15 | #include "mtrr.h" |
15 | 16 | ||
16 | struct mtrr_state { | 17 | struct mtrr_state { |
@@ -35,6 +36,7 @@ static struct fixed_range_block fixed_range_blocks[] = { | |||
35 | 36 | ||
36 | static unsigned long smp_changes_mask; | 37 | static unsigned long smp_changes_mask; |
37 | static struct mtrr_state mtrr_state = {}; | 38 | static struct mtrr_state mtrr_state = {}; |
39 | static int mtrr_state_set; | ||
38 | 40 | ||
39 | #undef MODULE_PARAM_PREFIX | 41 | #undef MODULE_PARAM_PREFIX |
40 | #define MODULE_PARAM_PREFIX "mtrr." | 42 | #define MODULE_PARAM_PREFIX "mtrr." |
@@ -42,6 +44,106 @@ static struct mtrr_state mtrr_state = {}; | |||
42 | static int mtrr_show; | 44 | static int mtrr_show; |
43 | module_param_named(show, mtrr_show, bool, 0); | 45 | module_param_named(show, mtrr_show, bool, 0); |
44 | 46 | ||
47 | /* | ||
48 | * Returns the effective MTRR type for the region | ||
49 | * Error returns: | ||
50 | * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR | ||
51 | * - 0xFF - when MTRR is not enabled | ||
52 | */ | ||
53 | u8 mtrr_type_lookup(u64 start, u64 end) | ||
54 | { | ||
55 | int i; | ||
56 | u64 base, mask; | ||
57 | u8 prev_match, curr_match; | ||
58 | |||
59 | if (!mtrr_state_set) | ||
60 | return 0xFF; | ||
61 | |||
62 | if (!mtrr_state.enabled) | ||
63 | return 0xFF; | ||
64 | |||
65 | /* Make end inclusive end, instead of exclusive */ | ||
66 | end--; | ||
67 | |||
68 | /* Look in fixed ranges. Just return the type as per start */ | ||
69 | if (mtrr_state.have_fixed && (start < 0x100000)) { | ||
70 | int idx; | ||
71 | |||
72 | if (start < 0x80000) { | ||
73 | idx = 0; | ||
74 | idx += (start >> 16); | ||
75 | return mtrr_state.fixed_ranges[idx]; | ||
76 | } else if (start < 0xC0000) { | ||
77 | idx = 1 * 8; | ||
78 | idx += ((start - 0x80000) >> 14); | ||
79 | return mtrr_state.fixed_ranges[idx]; | ||
80 | } else if (start < 0x1000000) { | ||
81 | idx = 3 * 8; | ||
82 | idx += ((start - 0xC0000) >> 12); | ||
83 | return mtrr_state.fixed_ranges[idx]; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Look in variable ranges | ||
89 | * Look of multiple ranges matching this address and pick type | ||
90 | * as per MTRR precedence | ||
91 | */ | ||
92 | if (!mtrr_state.enabled & 2) { | ||
93 | return mtrr_state.def_type; | ||
94 | } | ||
95 | |||
96 | prev_match = 0xFF; | ||
97 | for (i = 0; i < num_var_ranges; ++i) { | ||
98 | unsigned short start_state, end_state; | ||
99 | |||
100 | if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) | ||
101 | continue; | ||
102 | |||
103 | base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + | ||
104 | (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); | ||
105 | mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + | ||
106 | (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); | ||
107 | |||
108 | start_state = ((start & mask) == (base & mask)); | ||
109 | end_state = ((end & mask) == (base & mask)); | ||
110 | if (start_state != end_state) | ||
111 | return 0xFE; | ||
112 | |||
113 | if ((start & mask) != (base & mask)) { | ||
114 | continue; | ||
115 | } | ||
116 | |||
117 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | ||
118 | if (prev_match == 0xFF) { | ||
119 | prev_match = curr_match; | ||
120 | continue; | ||
121 | } | ||
122 | |||
123 | if (prev_match == MTRR_TYPE_UNCACHABLE || | ||
124 | curr_match == MTRR_TYPE_UNCACHABLE) { | ||
125 | return MTRR_TYPE_UNCACHABLE; | ||
126 | } | ||
127 | |||
128 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
129 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
130 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
131 | curr_match == MTRR_TYPE_WRBACK)) { | ||
132 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
133 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
134 | } | ||
135 | |||
136 | if (prev_match != curr_match) { | ||
137 | return MTRR_TYPE_UNCACHABLE; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | if (prev_match != 0xFF) | ||
142 | return prev_match; | ||
143 | |||
144 | return mtrr_state.def_type; | ||
145 | } | ||
146 | |||
45 | /* Get the MSR pair relating to a var range */ | 147 | /* Get the MSR pair relating to a var range */ |
46 | static void | 148 | static void |
47 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 149 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
@@ -79,12 +181,16 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) | |||
79 | base, base + step - 1, mtrr_attrib_to_str(*types)); | 181 | base, base + step - 1, mtrr_attrib_to_str(*types)); |
80 | } | 182 | } |
81 | 183 | ||
184 | static void prepare_set(void); | ||
185 | static void post_set(void); | ||
186 | |||
82 | /* Grab all of the MTRR state for this CPU into *state */ | 187 | /* Grab all of the MTRR state for this CPU into *state */ |
83 | void __init get_mtrr_state(void) | 188 | void __init get_mtrr_state(void) |
84 | { | 189 | { |
85 | unsigned int i; | 190 | unsigned int i; |
86 | struct mtrr_var_range *vrs; | 191 | struct mtrr_var_range *vrs; |
87 | unsigned lo, dummy; | 192 | unsigned lo, dummy; |
193 | unsigned long flags; | ||
88 | 194 | ||
89 | vrs = mtrr_state.var_ranges; | 195 | vrs = mtrr_state.var_ranges; |
90 | 196 | ||
@@ -131,6 +237,17 @@ void __init get_mtrr_state(void) | |||
131 | printk(KERN_INFO "MTRR %u disabled\n", i); | 237 | printk(KERN_INFO "MTRR %u disabled\n", i); |
132 | } | 238 | } |
133 | } | 239 | } |
240 | mtrr_state_set = 1; | ||
241 | |||
242 | /* PAT setup for BP. We need to go through sync steps here */ | ||
243 | local_irq_save(flags); | ||
244 | prepare_set(); | ||
245 | |||
246 | pat_init(); | ||
247 | |||
248 | post_set(); | ||
249 | local_irq_restore(flags); | ||
250 | |||
134 | } | 251 | } |
135 | 252 | ||
136 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 253 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ |
@@ -397,6 +514,9 @@ static void generic_set_all(void) | |||
397 | /* Actually set the state */ | 514 | /* Actually set the state */ |
398 | mask = set_mtrr_state(); | 515 | mask = set_mtrr_state(); |
399 | 516 | ||
517 | /* also set PAT */ | ||
518 | pat_init(); | ||
519 | |||
400 | post_set(); | 520 | post_set(); |
401 | local_irq_restore(flags); | 521 | local_irq_restore(flags); |
402 | 522 | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 9ab9889863f0..20941d2954e2 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o | 1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o | ||
2 | 3 | ||
3 | obj-$(CONFIG_X86_32) += pgtable_32.o | 4 | obj-$(CONFIG_X86_32) += pgtable_32.o |
4 | 5 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 6cdfc0fd68be..f7d5ca170c22 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -773,14 +773,14 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages, | |||
773 | int set_memory_uc(unsigned long addr, int numpages) | 773 | int set_memory_uc(unsigned long addr, int numpages) |
774 | { | 774 | { |
775 | return change_page_attr_set(addr, numpages, | 775 | return change_page_attr_set(addr, numpages, |
776 | __pgprot(_PAGE_PCD)); | 776 | __pgprot(_PAGE_CACHE_UC)); |
777 | } | 777 | } |
778 | EXPORT_SYMBOL(set_memory_uc); | 778 | EXPORT_SYMBOL(set_memory_uc); |
779 | 779 | ||
780 | int set_memory_wb(unsigned long addr, int numpages) | 780 | int set_memory_wb(unsigned long addr, int numpages) |
781 | { | 781 | { |
782 | return change_page_attr_clear(addr, numpages, | 782 | return change_page_attr_clear(addr, numpages, |
783 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | 783 | __pgprot(_PAGE_CACHE_MASK)); |
784 | } | 784 | } |
785 | EXPORT_SYMBOL(set_memory_wb); | 785 | EXPORT_SYMBOL(set_memory_wb); |
786 | 786 | ||
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c new file mode 100644 index 000000000000..7cc71d868483 --- /dev/null +++ b/arch/x86/mm/pat.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * Handle caching attributes in page tables (PAT) | ||
3 | * | ||
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | ||
6 | * | ||
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | ||
8 | */ | ||
9 | |||
10 | #include <linux/mm.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/gfp.h> | ||
13 | #include <linux/fs.h> | ||
14 | |||
15 | #include <asm/msr.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/pat.h> | ||
20 | #include <asm/e820.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/fcntl.h> | ||
23 | #include <asm/mtrr.h> | ||
24 | |||
25 | int pat_wc_enabled = 1; | ||
26 | |||
27 | static u64 __read_mostly boot_pat_state; | ||
28 | |||
29 | static int nopat(char *str) | ||
30 | { | ||
31 | pat_wc_enabled = 0; | ||
32 | printk(KERN_INFO "x86: PAT support disabled.\n"); | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | early_param("nopat", nopat); | ||
37 | |||
38 | static int pat_known_cpu(void) | ||
39 | { | ||
40 | if (!pat_wc_enabled) | ||
41 | return 0; | ||
42 | |||
43 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
44 | (boot_cpu_data.x86 == 0xF || | ||
45 | (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 15))) { | ||
46 | if (cpu_has_pat) { | ||
47 | return 1; | ||
48 | } | ||
49 | } | ||
50 | |||
51 | pat_wc_enabled = 0; | ||
52 | printk(KERN_INFO "CPU and/or kernel does not support PAT.\n"); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | enum { | ||
57 | PAT_UC = 0, /* uncached */ | ||
58 | PAT_WC = 1, /* Write combining */ | ||
59 | PAT_WT = 4, /* Write Through */ | ||
60 | PAT_WP = 5, /* Write Protected */ | ||
61 | PAT_WB = 6, /* Write Back (default) */ | ||
62 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | ||
63 | }; | ||
64 | |||
65 | #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8)) | ||
66 | |||
67 | void pat_init(void) | ||
68 | { | ||
69 | u64 pat; | ||
70 | |||
71 | #ifndef CONFIG_X86_PAT | ||
72 | nopat(NULL); | ||
73 | #endif | ||
74 | |||
75 | /* Boot CPU enables PAT based on CPU feature */ | ||
76 | if (!smp_processor_id() && !pat_known_cpu()) | ||
77 | return; | ||
78 | |||
79 | /* APs enable PAT iff boot CPU has enabled it before */ | ||
80 | if (smp_processor_id() && !pat_wc_enabled) | ||
81 | return; | ||
82 | |||
83 | /* Set PWT to Write-Combining. All other bits stay the same */ | ||
84 | /* | ||
85 | * PTE encoding used in Linux: | ||
86 | * PAT | ||
87 | * |PCD | ||
88 | * ||PWT | ||
89 | * ||| | ||
90 | * 000 WB _PAGE_CACHE_WB | ||
91 | * 001 WC _PAGE_CACHE_WC | ||
92 | * 010 UC- _PAGE_CACHE_UC_MINUS | ||
93 | * 011 UC _PAGE_CACHE_UC | ||
94 | * PAT bit unused | ||
95 | */ | ||
96 | pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) | | ||
97 | PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); | ||
98 | |||
99 | /* Boot CPU check */ | ||
100 | if (!smp_processor_id()) { | ||
101 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); | ||
102 | } | ||
103 | |||
104 | wrmsrl(MSR_IA32_CR_PAT, pat); | ||
105 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | ||
106 | smp_processor_id(), boot_pat_state, pat); | ||
107 | } | ||
108 | |||
109 | #undef PAT | ||
110 | |||
111 | static char *cattr_name(unsigned long flags) | ||
112 | { | ||
113 | switch (flags & _PAGE_CACHE_MASK) { | ||
114 | case _PAGE_CACHE_UC: return "uncached"; | ||
115 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | ||
116 | case _PAGE_CACHE_WB: return "write-back"; | ||
117 | case _PAGE_CACHE_WC: return "write-combining"; | ||
118 | default: return "broken"; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * The global memtype list keeps track of memory type for specific | ||
124 | * physical memory areas. Conflicting memory types in different | ||
125 | * mappings can cause CPU cache corruption. To avoid this we keep track. | ||
126 | * | ||
127 | * The list is sorted based on starting address and can contain multiple | ||
128 | * entries for each address (this allows reference counting for overlapping | ||
129 | * areas). All the aliases have the same cache attributes of course. | ||
130 | * Zero attributes are represented as holes. | ||
131 | * | ||
132 | * Currently the data structure is a list because the number of mappings | ||
133 | * are expected to be relatively small. If this should be a problem | ||
134 | * it could be changed to a rbtree or similar. | ||
135 | * | ||
136 | * memtype_lock protects the whole list. | ||
137 | */ | ||
138 | |||
139 | struct memtype { | ||
140 | u64 start; | ||
141 | u64 end; | ||
142 | unsigned long type; | ||
143 | struct list_head nd; | ||
144 | }; | ||
145 | |||
146 | static LIST_HEAD(memtype_list); | ||
147 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | ||
148 | |||
149 | /* | ||
150 | * Does intersection of PAT memory type and MTRR memory type and returns | ||
151 | * the resulting memory type as PAT understands it. | ||
152 | * (Type in pat and mtrr will not have same value) | ||
153 | * The intersection is based on "Effective Memory Type" tables in IA-32 | ||
154 | * SDM vol 3a | ||
155 | */ | ||
156 | static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | ||
157 | unsigned long *ret_prot) | ||
158 | { | ||
159 | unsigned long pat_type; | ||
160 | u8 mtrr_type; | ||
161 | |||
162 | mtrr_type = mtrr_type_lookup(start, end); | ||
163 | if (mtrr_type == 0xFF) { /* MTRR not enabled */ | ||
164 | *ret_prot = prot; | ||
165 | return 0; | ||
166 | } | ||
167 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
168 | *ret_prot = _PAGE_CACHE_UC; | ||
169 | return -1; | ||
170 | } | ||
171 | if (mtrr_type != MTRR_TYPE_UNCACHABLE && | ||
172 | mtrr_type != MTRR_TYPE_WRBACK && | ||
173 | mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ | ||
174 | *ret_prot = _PAGE_CACHE_UC; | ||
175 | return -1; | ||
176 | } | ||
177 | |||
178 | pat_type = prot & _PAGE_CACHE_MASK; | ||
179 | prot &= (~_PAGE_CACHE_MASK); | ||
180 | |||
181 | /* Currently doing intersection by hand. Optimize it later. */ | ||
182 | if (pat_type == _PAGE_CACHE_WC) { | ||
183 | *ret_prot = prot | _PAGE_CACHE_WC; | ||
184 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { | ||
185 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; | ||
186 | } else if (pat_type == _PAGE_CACHE_UC || | ||
187 | mtrr_type == MTRR_TYPE_UNCACHABLE) { | ||
188 | *ret_prot = prot | _PAGE_CACHE_UC; | ||
189 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { | ||
190 | *ret_prot = prot | _PAGE_CACHE_WC; | ||
191 | } else { | ||
192 | *ret_prot = prot | _PAGE_CACHE_WB; | ||
193 | } | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | ||
199 | unsigned long *ret_type) | ||
200 | { | ||
201 | struct memtype *new_entry = NULL; | ||
202 | struct memtype *parse; | ||
203 | unsigned long actual_type; | ||
204 | int err = 0; | ||
205 | |||
206 | /* Only track when pat_wc_enabled */ | ||
207 | if (!pat_wc_enabled) { | ||
208 | if (ret_type) | ||
209 | *ret_type = req_type; | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /* Low ISA region is always mapped WB in page table. No need to track */ | ||
215 | if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) { | ||
216 | if (ret_type) | ||
217 | *ret_type = _PAGE_CACHE_WB; | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | req_type &= _PAGE_CACHE_MASK; | ||
223 | err = pat_x_mtrr_type(start, end, req_type, &actual_type); | ||
224 | if (err) { | ||
225 | if (ret_type) | ||
226 | *ret_type = actual_type; | ||
227 | |||
228 | return -EINVAL; | ||
229 | } | ||
230 | |||
231 | new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | ||
232 | if (!new_entry) | ||
233 | return -ENOMEM; | ||
234 | |||
235 | new_entry->start = start; | ||
236 | new_entry->end = end; | ||
237 | new_entry->type = actual_type; | ||
238 | |||
239 | if (ret_type) | ||
240 | *ret_type = actual_type; | ||
241 | |||
242 | spin_lock(&memtype_lock); | ||
243 | |||
244 | /* Search for existing mapping that overlaps the current range */ | ||
245 | list_for_each_entry(parse, &memtype_list, nd) { | ||
246 | struct memtype *saved_ptr; | ||
247 | |||
248 | if (parse->start >= end) { | ||
249 | list_add(&new_entry->nd, parse->nd.prev); | ||
250 | new_entry = NULL; | ||
251 | break; | ||
252 | } | ||
253 | |||
254 | if (start <= parse->start && end >= parse->start) { | ||
255 | if (actual_type != parse->type && ret_type) { | ||
256 | actual_type = parse->type; | ||
257 | *ret_type = actual_type; | ||
258 | new_entry->type = actual_type; | ||
259 | } | ||
260 | |||
261 | if (actual_type != parse->type) { | ||
262 | printk( | ||
263 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
264 | current->comm, current->pid, | ||
265 | start, end, | ||
266 | cattr_name(actual_type), | ||
267 | cattr_name(parse->type)); | ||
268 | err = -EBUSY; | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | saved_ptr = parse; | ||
273 | /* | ||
274 | * Check to see whether the request overlaps more | ||
275 | * than one entry in the list | ||
276 | */ | ||
277 | list_for_each_entry_continue(parse, &memtype_list, nd) { | ||
278 | if (end <= parse->start) { | ||
279 | break; | ||
280 | } | ||
281 | |||
282 | if (actual_type != parse->type) { | ||
283 | printk( | ||
284 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
285 | current->comm, current->pid, | ||
286 | start, end, | ||
287 | cattr_name(actual_type), | ||
288 | cattr_name(parse->type)); | ||
289 | err = -EBUSY; | ||
290 | break; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | if (err) { | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | /* No conflict. Go ahead and add this new entry */ | ||
299 | list_add(&new_entry->nd, saved_ptr->nd.prev); | ||
300 | new_entry = NULL; | ||
301 | break; | ||
302 | } | ||
303 | |||
304 | if (start < parse->end) { | ||
305 | if (actual_type != parse->type && ret_type) { | ||
306 | actual_type = parse->type; | ||
307 | *ret_type = actual_type; | ||
308 | new_entry->type = actual_type; | ||
309 | } | ||
310 | |||
311 | if (actual_type != parse->type) { | ||
312 | printk( | ||
313 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
314 | current->comm, current->pid, | ||
315 | start, end, | ||
316 | cattr_name(actual_type), | ||
317 | cattr_name(parse->type)); | ||
318 | err = -EBUSY; | ||
319 | break; | ||
320 | } | ||
321 | |||
322 | saved_ptr = parse; | ||
323 | /* | ||
324 | * Check to see whether the request overlaps more | ||
325 | * than one entry in the list | ||
326 | */ | ||
327 | list_for_each_entry_continue(parse, &memtype_list, nd) { | ||
328 | if (end <= parse->start) { | ||
329 | break; | ||
330 | } | ||
331 | |||
332 | if (actual_type != parse->type) { | ||
333 | printk( | ||
334 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
335 | current->comm, current->pid, | ||
336 | start, end, | ||
337 | cattr_name(actual_type), | ||
338 | cattr_name(parse->type)); | ||
339 | err = -EBUSY; | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | if (err) { | ||
345 | break; | ||
346 | } | ||
347 | |||
348 | /* No conflict. Go ahead and add this new entry */ | ||
349 | list_add(&new_entry->nd, &saved_ptr->nd); | ||
350 | new_entry = NULL; | ||
351 | break; | ||
352 | } | ||
353 | } | ||
354 | |||
355 | if (err) { | ||
356 | kfree(new_entry); | ||
357 | spin_unlock(&memtype_lock); | ||
358 | return err; | ||
359 | } | ||
360 | |||
361 | if (new_entry) { | ||
362 | /* No conflict. Not yet added to the list. Add to the tail */ | ||
363 | list_add_tail(&new_entry->nd, &memtype_list); | ||
364 | } | ||
365 | |||
366 | spin_unlock(&memtype_lock); | ||
367 | return err; | ||
368 | } | ||
369 | |||
370 | int free_memtype(u64 start, u64 end) | ||
371 | { | ||
372 | struct memtype *ml; | ||
373 | int err = -EINVAL; | ||
374 | |||
375 | /* Only track when pat_wc_enabled */ | ||
376 | if (!pat_wc_enabled) { | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | /* Low ISA region is always mapped WB. No need to track */ | ||
381 | if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) { | ||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | spin_lock(&memtype_lock); | ||
386 | list_for_each_entry(ml, &memtype_list, nd) { | ||
387 | if (ml->start == start && ml->end == end) { | ||
388 | list_del(&ml->nd); | ||
389 | kfree(ml); | ||
390 | err = 0; | ||
391 | break; | ||
392 | } | ||
393 | } | ||
394 | spin_unlock(&memtype_lock); | ||
395 | |||
396 | if (err) { | ||
397 | printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n", | ||
398 | current->comm, current->pid, start, end); | ||
399 | } | ||
400 | return err; | ||
401 | } | ||
402 | |||
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 90feb6f2562c..0d609c837a41 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -186,6 +186,7 @@ extern const char * const x86_power_flags[32]; | |||
186 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | 186 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
187 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 187 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
188 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | 188 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
189 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | ||
189 | 190 | ||
190 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 191 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
191 | # define cpu_has_invlpg 1 | 192 | # define cpu_has_invlpg 1 |
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 3ed97144c07b..af4e07f661b8 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -57,6 +57,8 @@ | |||
57 | #define MSR_MTRRfix4K_F8000 0x0000026f | 57 | #define MSR_MTRRfix4K_F8000 0x0000026f |
58 | #define MSR_MTRRdefType 0x000002ff | 58 | #define MSR_MTRRdefType 0x000002ff |
59 | 59 | ||
60 | #define MSR_IA32_CR_PAT 0x00000277 | ||
61 | |||
60 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 | 62 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
61 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db | 63 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
62 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc | 64 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 319d065800be..968794af93f9 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h | |||
@@ -84,6 +84,8 @@ struct mtrr_gentry | |||
84 | 84 | ||
85 | #ifdef __KERNEL__ | 85 | #ifdef __KERNEL__ |
86 | 86 | ||
87 | extern u8 mtrr_type_lookup(u64 addr, u64 end); | ||
88 | |||
87 | /* The following functions are for use by other drivers */ | 89 | /* The following functions are for use by other drivers */ |
88 | # ifdef CONFIG_MTRR | 90 | # ifdef CONFIG_MTRR |
89 | extern void mtrr_save_fixed_ranges(void *); | 91 | extern void mtrr_save_fixed_ranges(void *); |
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h new file mode 100644 index 000000000000..8b822b5a1786 --- /dev/null +++ b/include/asm-x86/pat.h | |||
@@ -0,0 +1,16 @@ | |||
1 | |||
2 | #ifndef _ASM_PAT_H | ||
3 | #define _ASM_PAT_H 1 | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | |||
7 | extern int pat_wc_enabled; | ||
8 | |||
9 | extern void pat_init(void); | ||
10 | |||
11 | extern int reserve_memtype(u64 start, u64 end, | ||
12 | unsigned long req_type, unsigned long *ret_type); | ||
13 | extern int free_memtype(u64 start, u64 end); | ||
14 | |||
15 | #endif | ||
16 | |||
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 9cf472aeb9ce..ca6deb3de7c0 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -57,6 +57,12 @@ | |||
57 | 57 | ||
58 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 58 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
59 | 59 | ||
60 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) | ||
61 | #define _PAGE_CACHE_WB (0) | ||
62 | #define _PAGE_CACHE_WC (_PAGE_PWT) | ||
63 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | ||
64 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | ||
65 | |||
60 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 66 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
61 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 67 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) |
62 | 68 | ||