aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venki@google.com>2010-09-10 18:55:50 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-09-10 19:11:20 -0400
commit351e5a703ad994405bd900da330823d3b4a372e0 (patch)
treec1ed671db8277d5f04e5a9e695e5d71fb8349f73 /arch/x86/kernel/cpu
parenta7f07cfbaa1dd5bf9e615948f280c92e7928e6f7 (diff)
x86, mtrr: Support mtrr lookup for range spanning across MTRR range
mtrr_type_lookup [start:end] looked up the resultant MTRR type for that range, based on fixed and all variable MTRR ranges. It did check for multiple MTRR var ranges overlapping [start:end] and returned the net type. However, if the [start:end] range spanned across any var MTRR range, mtrr_type_lookup would return an error return of 0xFE. This was based on typical usage of mtrr_type_lookup in PAT mapping, where region being mapped would not normally span across MTRR ranges and also trying to keep the code simple. Mark recently reported the problem with this limitation. When there are two continguous MTRR's of type "writeback" and if there is a memory mapping over a region starting in one MTRR range and ending in another MTRR range, such mapping will fallback to "uncached" due to the above limitation. Change below adds support for such lookups spanning multiple MTRR ranges. We now have a wrapper mtrr_type_lookup that dynamically splits such a region into smaller chunks that fit within one MTRR range and does a __mtrr_type_lookup on it and combine the results later. Reported-by: Mark Langsdorf <mark.langsdorf@amd.com> Signed-off-by: Venkatesh Pallipadi <venki@google.com> LKML-Reference: <1284159350-19841-3-git-send-email-venki@google.com> Reviewed-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c84
1 files changed, 77 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 14f4f0c0329a..9f27228ceffd 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -64,6 +64,18 @@ static inline void k8_check_syscfg_dram_mod_en(void)
64 } 64 }
65} 65}
66 66
67/* Get the size of contiguous MTRR range */
68static u64 get_mtrr_size(u64 mask)
69{
70 u64 size;
71
72 mask >>= PAGE_SHIFT;
73 mask |= size_or_mask;
74 size = -mask;
75 size <<= PAGE_SHIFT;
76 return size;
77}
78
67/* 79/*
68 * Check and return the effective type for MTRR-MTRR type overlap. 80 * Check and return the effective type for MTRR-MTRR type overlap.
69 * Returns 1 if the effective type is UNCACHEABLE, else returns 0 81 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
@@ -92,17 +104,19 @@ static int check_type_overlap(u8 *prev, u8 *curr)
92} 104}
93 105
94/* 106/*
95 * Returns the effective MTRR type for the region 107 * Error/Semi-error returns:
96 * Error returns: 108 * 0xFF - when MTRR is not enabled
97 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR 109 * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
98 * - 0xFF - when MTRR is not enabled 110 * corresponds only to [start:*partial_end].
111 * Caller has to lookup again for [*partial_end:end].
99 */ 112 */
100u8 mtrr_type_lookup(u64 start, u64 end) 113static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
101{ 114{
102 int i; 115 int i;
103 u64 base, mask; 116 u64 base, mask;
104 u8 prev_match, curr_match; 117 u8 prev_match, curr_match;
105 118
119 *repeat = 0;
106 if (!mtrr_state_set) 120 if (!mtrr_state_set)
107 return 0xFF; 121 return 0xFF;
108 122
@@ -153,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end)
153 167
154 start_state = ((start & mask) == (base & mask)); 168 start_state = ((start & mask) == (base & mask));
155 end_state = ((end & mask) == (base & mask)); 169 end_state = ((end & mask) == (base & mask));
156 if (start_state != end_state) 170
157 return 0xFE; 171 if (start_state != end_state) {
172 /*
173 * We have start:end spanning across an MTRR.
174 * We split the region into
175 * either
176 * (start:mtrr_end) (mtrr_end:end)
177 * or
178 * (start:mtrr_start) (mtrr_start:end)
179 * depending on kind of overlap.
180 * Return the type for first region and a pointer to
181 * the start of second region so that caller will
182 * lookup again on the second region.
183 * Note: This way we handle multiple overlaps as well.
184 */
185 if (start_state)
186 *partial_end = base + get_mtrr_size(mask);
187 else
188 *partial_end = base;
189
190 if (unlikely(*partial_end <= start)) {
191 WARN_ON(1);
192 *partial_end = start + PAGE_SIZE;
193 }
194
195 end = *partial_end - 1; /* end is inclusive */
196 *repeat = 1;
197 }
158 198
159 if ((start & mask) != (base & mask)) 199 if ((start & mask) != (base & mask))
160 continue; 200 continue;
@@ -180,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end)
180 return mtrr_state.def_type; 220 return mtrr_state.def_type;
181} 221}
182 222
223/*
224 * Returns the effective MTRR type for the region
225 * Error return:
226 * 0xFF - when MTRR is not enabled
227 */
228u8 mtrr_type_lookup(u64 start, u64 end)
229{
230 u8 type, prev_type;
231 int repeat;
232 u64 partial_end;
233
234 type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
235
236 /*
237 * Common path is with repeat = 0.
238 * However, we can have cases where [start:end] spans across some
239 * MTRR range. Do repeated lookups for that case here.
240 */
241 while (repeat) {
242 prev_type = type;
243 start = partial_end;
244 type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
245
246 if (check_type_overlap(&prev_type, &type))
247 return type;
248 }
249
250 return type;
251}
252
183/* Get the MSR pair relating to a var range */ 253/* Get the MSR pair relating to a var range */
184static void 254static void
185get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 255get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)