diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-06-16 13:42:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-18 05:51:08 -0400 |
commit | 6cf514fce18589ea1e0521c5f2d7c2bb280fefc7 (patch) | |
tree | 2f1fcc63d4e0e0d054050382e2d1136481fafc76 /arch/x86/mm | |
parent | faeca31d068090285b77c39574d2bda14b079c50 (diff) |
x86: PAT: make pat_x_mtrr_type() more readable
Clean up over-complications in pat_x_mtrr_type().
And if reserve_memtype() ignores stray req_type bits when
pat_enabled, it's better to mask them off when not also.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pat.c | 47 |
1 files changed, 12 insertions, 35 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 7c21572bbdda..ac3a2b11eb38 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -159,47 +159,31 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | |||
159 | * The intersection is based on "Effective Memory Type" tables in IA-32 | 159 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
160 | * SDM vol 3a | 160 | * SDM vol 3a |
161 | */ | 161 | */ |
162 | static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | 162 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
163 | unsigned long *ret_prot) | ||
164 | { | 163 | { |
165 | unsigned long pat_type; | ||
166 | u8 mtrr_type; | 164 | u8 mtrr_type; |
167 | 165 | ||
168 | pat_type = prot & _PAGE_CACHE_MASK; | ||
169 | prot &= (~_PAGE_CACHE_MASK); | ||
170 | |||
171 | /* | 166 | /* |
172 | * We return the PAT request directly for types where PAT takes | 167 | * We return the PAT request directly for types where PAT takes |
173 | * precedence with respect to MTRR and for UC_MINUS. | 168 | * precedence with respect to MTRR and for UC_MINUS. |
174 | * Consistency checks with other PAT requests is done later | 169 | * Consistency checks with other PAT requests is done later |
175 | * while going through memtype list. | 170 | * while going through memtype list. |
176 | */ | 171 | */ |
177 | if (pat_type == _PAGE_CACHE_WC) { | 172 | if (req_type == _PAGE_CACHE_WC || |
178 | *ret_prot = prot | _PAGE_CACHE_WC; | 173 | req_type == _PAGE_CACHE_UC_MINUS || |
179 | return 0; | 174 | req_type == _PAGE_CACHE_UC) |
180 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { | 175 | return req_type; |
181 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; | ||
182 | return 0; | ||
183 | } else if (pat_type == _PAGE_CACHE_UC) { | ||
184 | *ret_prot = prot | _PAGE_CACHE_UC; | ||
185 | return 0; | ||
186 | } | ||
187 | 176 | ||
188 | /* | 177 | /* |
189 | * Look for MTRR hint to get the effective type in case where PAT | 178 | * Look for MTRR hint to get the effective type in case where PAT |
190 | * request is for WB. | 179 | * request is for WB. |
191 | */ | 180 | */ |
192 | mtrr_type = mtrr_type_lookup(start, end); | 181 | mtrr_type = mtrr_type_lookup(start, end); |
193 | 182 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) | |
194 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) { | 183 | return _PAGE_CACHE_UC; |
195 | *ret_prot = prot | _PAGE_CACHE_UC; | 184 | if (mtrr_type == MTRR_TYPE_WRCOMB) |
196 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { | 185 | return _PAGE_CACHE_WC; |
197 | *ret_prot = prot | _PAGE_CACHE_WC; | 186 | return _PAGE_CACHE_WB; |
198 | } else { | ||
199 | *ret_prot = prot | _PAGE_CACHE_WB; | ||
200 | } | ||
201 | |||
202 | return 0; | ||
203 | } | 187 | } |
204 | 188 | ||
205 | /* | 189 | /* |
@@ -232,7 +216,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
232 | if (req_type == -1) { | 216 | if (req_type == -1) { |
233 | *ret_type = _PAGE_CACHE_WB; | 217 | *ret_type = _PAGE_CACHE_WB; |
234 | } else { | 218 | } else { |
235 | *ret_type = req_type; | 219 | *ret_type = req_type & _PAGE_CACHE_MASK; |
236 | } | 220 | } |
237 | } | 221 | } |
238 | return 0; | 222 | return 0; |
@@ -264,14 +248,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
264 | } | 248 | } |
265 | } else { | 249 | } else { |
266 | req_type &= _PAGE_CACHE_MASK; | 250 | req_type &= _PAGE_CACHE_MASK; |
267 | err = pat_x_mtrr_type(start, end, req_type, &actual_type); | 251 | actual_type = pat_x_mtrr_type(start, end, req_type); |
268 | } | ||
269 | |||
270 | if (err) { | ||
271 | if (ret_type) | ||
272 | *ret_type = actual_type; | ||
273 | |||
274 | return -EINVAL; | ||
275 | } | 252 | } |
276 | 253 | ||
277 | new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 254 | new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); |