diff options
Diffstat (limited to 'arch/x86/kernel/cpu/mtrr')
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/cleanup.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 130 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 40 |
3 files changed, 137 insertions, 35 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index c5f59d071425..ac140c7be396 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void) | |||
827 | 827 | ||
828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
829 | return 0; | 829 | return 0; |
830 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 830 | if (boot_cpu_data.x86 < 0xf) |
831 | return 0; | 831 | return 0; |
832 | /* In case some hypervisor doesn't pass SYSCFG through: */ | 832 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 7d28d7d03885..a71efcdbb092 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong | 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
3 | * because MTRRs can span upto 40 bits (36bits on most modern x86) | 3 | * because MTRRs can span up to 40 bits (36bits on most modern x86) |
4 | */ | 4 | */ |
5 | #define DEBUG | 5 | #define DEBUG |
6 | 6 | ||
@@ -64,18 +64,59 @@ static inline void k8_check_syscfg_dram_mod_en(void) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | /* Get the size of contiguous MTRR range */ | ||
68 | static u64 get_mtrr_size(u64 mask) | ||
69 | { | ||
70 | u64 size; | ||
71 | |||
72 | mask >>= PAGE_SHIFT; | ||
73 | mask |= size_or_mask; | ||
74 | size = -mask; | ||
75 | size <<= PAGE_SHIFT; | ||
76 | return size; | ||
77 | } | ||
78 | |||
67 | /* | 79 | /* |
68 | * Returns the effective MTRR type for the region | 80 | * Check and return the effective type for MTRR-MTRR type overlap. |
69 | * Error returns: | 81 | * Returns 1 if the effective type is UNCACHEABLE, else returns 0 |
70 | * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR | ||
71 | * - 0xFF - when MTRR is not enabled | ||
72 | */ | 82 | */ |
73 | u8 mtrr_type_lookup(u64 start, u64 end) | 83 | static int check_type_overlap(u8 *prev, u8 *curr) |
84 | { | ||
85 | if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { | ||
86 | *prev = MTRR_TYPE_UNCACHABLE; | ||
87 | *curr = MTRR_TYPE_UNCACHABLE; | ||
88 | return 1; | ||
89 | } | ||
90 | |||
91 | if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || | ||
92 | (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { | ||
93 | *prev = MTRR_TYPE_WRTHROUGH; | ||
94 | *curr = MTRR_TYPE_WRTHROUGH; | ||
95 | } | ||
96 | |||
97 | if (*prev != *curr) { | ||
98 | *prev = MTRR_TYPE_UNCACHABLE; | ||
99 | *curr = MTRR_TYPE_UNCACHABLE; | ||
100 | return 1; | ||
101 | } | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Error/Semi-error returns: | ||
108 | * 0xFF - when MTRR is not enabled | ||
109 | * *repeat == 1 implies [start:end] spanned across MTRR range and type returned | ||
110 | * corresponds only to [start:*partial_end]. | ||
111 | * Caller has to lookup again for [*partial_end:end]. | ||
112 | */ | ||
113 | static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat) | ||
74 | { | 114 | { |
75 | int i; | 115 | int i; |
76 | u64 base, mask; | 116 | u64 base, mask; |
77 | u8 prev_match, curr_match; | 117 | u8 prev_match, curr_match; |
78 | 118 | ||
119 | *repeat = 0; | ||
79 | if (!mtrr_state_set) | 120 | if (!mtrr_state_set) |
80 | return 0xFF; | 121 | return 0xFF; |
81 | 122 | ||
@@ -126,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
126 | 167 | ||
127 | start_state = ((start & mask) == (base & mask)); | 168 | start_state = ((start & mask) == (base & mask)); |
128 | end_state = ((end & mask) == (base & mask)); | 169 | end_state = ((end & mask) == (base & mask)); |
129 | if (start_state != end_state) | 170 | |
130 | return 0xFE; | 171 | if (start_state != end_state) { |
172 | /* | ||
173 | * We have start:end spanning across an MTRR. | ||
174 | * We split the region into | ||
175 | * either | ||
176 | * (start:mtrr_end) (mtrr_end:end) | ||
177 | * or | ||
178 | * (start:mtrr_start) (mtrr_start:end) | ||
179 | * depending on kind of overlap. | ||
180 | * Return the type for first region and a pointer to | ||
181 | * the start of second region so that caller will | ||
182 | * lookup again on the second region. | ||
183 | * Note: This way we handle multiple overlaps as well. | ||
184 | */ | ||
185 | if (start_state) | ||
186 | *partial_end = base + get_mtrr_size(mask); | ||
187 | else | ||
188 | *partial_end = base; | ||
189 | |||
190 | if (unlikely(*partial_end <= start)) { | ||
191 | WARN_ON(1); | ||
192 | *partial_end = start + PAGE_SIZE; | ||
193 | } | ||
194 | |||
195 | end = *partial_end - 1; /* end is inclusive */ | ||
196 | *repeat = 1; | ||
197 | } | ||
131 | 198 | ||
132 | if ((start & mask) != (base & mask)) | 199 | if ((start & mask) != (base & mask)) |
133 | continue; | 200 | continue; |
@@ -138,21 +205,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
138 | continue; | 205 | continue; |
139 | } | 206 | } |
140 | 207 | ||
141 | if (prev_match == MTRR_TYPE_UNCACHABLE || | 208 | if (check_type_overlap(&prev_match, &curr_match)) |
142 | curr_match == MTRR_TYPE_UNCACHABLE) { | 209 | return curr_match; |
143 | return MTRR_TYPE_UNCACHABLE; | ||
144 | } | ||
145 | |||
146 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
147 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
148 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
149 | curr_match == MTRR_TYPE_WRBACK)) { | ||
150 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
151 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
152 | } | ||
153 | |||
154 | if (prev_match != curr_match) | ||
155 | return MTRR_TYPE_UNCACHABLE; | ||
156 | } | 210 | } |
157 | 211 | ||
158 | if (mtrr_tom2) { | 212 | if (mtrr_tom2) { |
@@ -166,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
166 | return mtrr_state.def_type; | 220 | return mtrr_state.def_type; |
167 | } | 221 | } |
168 | 222 | ||
223 | /* | ||
224 | * Returns the effective MTRR type for the region | ||
225 | * Error return: | ||
226 | * 0xFF - when MTRR is not enabled | ||
227 | */ | ||
228 | u8 mtrr_type_lookup(u64 start, u64 end) | ||
229 | { | ||
230 | u8 type, prev_type; | ||
231 | int repeat; | ||
232 | u64 partial_end; | ||
233 | |||
234 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); | ||
235 | |||
236 | /* | ||
237 | * Common path is with repeat = 0. | ||
238 | * However, we can have cases where [start:end] spans across some | ||
239 | * MTRR range. Do repeated lookups for that case here. | ||
240 | */ | ||
241 | while (repeat) { | ||
242 | prev_type = type; | ||
243 | start = partial_end; | ||
244 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); | ||
245 | |||
246 | if (check_type_overlap(&prev_type, &type)) | ||
247 | return type; | ||
248 | } | ||
249 | |||
250 | return type; | ||
251 | } | ||
252 | |||
169 | /* Get the MSR pair relating to a var range */ | 253 | /* Get the MSR pair relating to a var range */ |
170 | static void | 254 | static void |
171 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 255 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3ee6cc3..929739a653d1 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/cpu.h> | 45 | #include <linux/cpu.h> |
46 | #include <linux/pci.h> | 46 | #include <linux/pci.h> |
47 | #include <linux/smp.h> | 47 | #include <linux/smp.h> |
48 | #include <linux/syscore_ops.h> | ||
48 | 49 | ||
49 | #include <asm/processor.h> | 50 | #include <asm/processor.h> |
50 | #include <asm/e820.h> | 51 | #include <asm/e820.h> |
@@ -292,14 +293,24 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * HACK! | 295 | * HACK! |
295 | * We use this same function to initialize the mtrrs on boot. | 296 | * |
296 | * The state of the boot cpu's mtrrs has been saved, and we want | 297 | * We use this same function to initialize the mtrrs during boot, |
297 | * to replicate across all the APs. | 298 | * resume, runtime cpu online and on an explicit request to set a |
298 | * If we're doing that @reg is set to something special... | 299 | * specific MTRR. |
300 | * | ||
301 | * During boot or suspend, the state of the boot cpu's mtrrs has been | ||
302 | * saved, and we want to replicate that across all the cpus that come | ||
303 | * online (either at the end of boot or resume or during a runtime cpu | ||
304 | * online). If we're doing that, @reg is set to something special and on | ||
305 | * this cpu we still do mtrr_if->set_all(). During boot/resume, this | ||
306 | * is unnecessary if at this point we are still on the cpu that started | ||
307 | * the boot/resume sequence. But there is no guarantee that we are still | ||
308 | * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be | ||
309 | * sure that we are in sync with everyone else. | ||
299 | */ | 310 | */ |
300 | if (reg != ~0U) | 311 | if (reg != ~0U) |
301 | mtrr_if->set(reg, base, size, type); | 312 | mtrr_if->set(reg, base, size, type); |
302 | else if (!mtrr_aps_delayed_init) | 313 | else |
303 | mtrr_if->set_all(); | 314 | mtrr_if->set_all(); |
304 | 315 | ||
305 | /* Wait for the others */ | 316 | /* Wait for the others */ |
@@ -630,7 +641,7 @@ struct mtrr_value { | |||
630 | 641 | ||
631 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; | 642 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; |
632 | 643 | ||
633 | static int mtrr_save(struct sys_device *sysdev, pm_message_t state) | 644 | static int mtrr_save(void) |
634 | { | 645 | { |
635 | int i; | 646 | int i; |
636 | 647 | ||
@@ -642,7 +653,7 @@ static int mtrr_save(struct sys_device *sysdev, pm_message_t state) | |||
642 | return 0; | 653 | return 0; |
643 | } | 654 | } |
644 | 655 | ||
645 | static int mtrr_restore(struct sys_device *sysdev) | 656 | static void mtrr_restore(void) |
646 | { | 657 | { |
647 | int i; | 658 | int i; |
648 | 659 | ||
@@ -653,12 +664,11 @@ static int mtrr_restore(struct sys_device *sysdev) | |||
653 | mtrr_value[i].ltype); | 664 | mtrr_value[i].ltype); |
654 | } | 665 | } |
655 | } | 666 | } |
656 | return 0; | ||
657 | } | 667 | } |
658 | 668 | ||
659 | 669 | ||
660 | 670 | ||
661 | static struct sysdev_driver mtrr_sysdev_driver = { | 671 | static struct syscore_ops mtrr_syscore_ops = { |
662 | .suspend = mtrr_save, | 672 | .suspend = mtrr_save, |
663 | .resume = mtrr_restore, | 673 | .resume = mtrr_restore, |
664 | }; | 674 | }; |
@@ -793,13 +803,21 @@ void set_mtrr_aps_delayed_init(void) | |||
793 | } | 803 | } |
794 | 804 | ||
795 | /* | 805 | /* |
796 | * MTRR initialization for all AP's | 806 | * Delayed MTRR initialization for all AP's |
797 | */ | 807 | */ |
798 | void mtrr_aps_init(void) | 808 | void mtrr_aps_init(void) |
799 | { | 809 | { |
800 | if (!use_intel()) | 810 | if (!use_intel()) |
801 | return; | 811 | return; |
802 | 812 | ||
813 | /* | ||
814 | * Check if someone has requested the delay of AP MTRR initialization, | ||
815 | * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, | ||
816 | * then we are done. | ||
817 | */ | ||
818 | if (!mtrr_aps_delayed_init) | ||
819 | return; | ||
820 | |||
803 | set_mtrr(~0U, 0, 0, 0); | 821 | set_mtrr(~0U, 0, 0, 0); |
804 | mtrr_aps_delayed_init = false; | 822 | mtrr_aps_delayed_init = false; |
805 | } | 823 | } |
@@ -831,7 +849,7 @@ static int __init mtrr_init_finialize(void) | |||
831 | * TBD: is there any system with such CPU which supports | 849 | * TBD: is there any system with such CPU which supports |
832 | * suspend/resume? If no, we should remove the code. | 850 | * suspend/resume? If no, we should remove the code. |
833 | */ | 851 | */ |
834 | sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); | 852 | register_syscore_ops(&mtrr_syscore_ops); |
835 | 853 | ||
836 | return 0; | 854 | return 0; |
837 | } | 855 | } |