diff options
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r-- | arch/x86/mm/pat.c | 392 |
1 files changed, 146 insertions, 246 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index de3a99812450..a885a1019b8a 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -26,15 +26,15 @@ | |||
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | 27 | ||
28 | #ifdef CONFIG_X86_PAT | 28 | #ifdef CONFIG_X86_PAT |
29 | int __read_mostly pat_wc_enabled = 1; | 29 | int __read_mostly pat_enabled = 1; |
30 | 30 | ||
31 | void __cpuinit pat_disable(char *reason) | 31 | void __cpuinit pat_disable(char *reason) |
32 | { | 32 | { |
33 | pat_wc_enabled = 0; | 33 | pat_enabled = 0; |
34 | printk(KERN_INFO "%s\n", reason); | 34 | printk(KERN_INFO "%s\n", reason); |
35 | } | 35 | } |
36 | 36 | ||
37 | static int nopat(char *str) | 37 | static int __init nopat(char *str) |
38 | { | 38 | { |
39 | pat_disable("PAT support disabled."); | 39 | pat_disable("PAT support disabled."); |
40 | return 0; | 40 | return 0; |
@@ -42,6 +42,19 @@ static int nopat(char *str) | |||
42 | early_param("nopat", nopat); | 42 | early_param("nopat", nopat); |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | |||
46 | static int debug_enable; | ||
47 | static int __init pat_debug_setup(char *str) | ||
48 | { | ||
49 | debug_enable = 1; | ||
50 | return 0; | ||
51 | } | ||
52 | __setup("debugpat", pat_debug_setup); | ||
53 | |||
54 | #define dprintk(fmt, arg...) \ | ||
55 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | ||
56 | |||
57 | |||
45 | static u64 __read_mostly boot_pat_state; | 58 | static u64 __read_mostly boot_pat_state; |
46 | 59 | ||
47 | enum { | 60 | enum { |
@@ -53,24 +66,25 @@ enum { | |||
53 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | 66 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ |
54 | }; | 67 | }; |
55 | 68 | ||
56 | #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8)) | 69 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
57 | 70 | ||
58 | void pat_init(void) | 71 | void pat_init(void) |
59 | { | 72 | { |
60 | u64 pat; | 73 | u64 pat; |
61 | 74 | ||
62 | if (!pat_wc_enabled) | 75 | if (!pat_enabled) |
63 | return; | 76 | return; |
64 | 77 | ||
65 | /* Paranoia check. */ | 78 | /* Paranoia check. */ |
66 | if (!cpu_has_pat) { | 79 | if (!cpu_has_pat && boot_pat_state) { |
67 | printk(KERN_ERR "PAT enabled, but CPU feature cleared\n"); | ||
68 | /* | 80 | /* |
69 | * Panic if this happens on the secondary CPU, and we | 81 | * If this happens we are on a secondary CPU, but |
70 | * switched to PAT on the boot CPU. We have no way to | 82 | * switched to PAT on the boot CPU. We have no way to |
71 | * undo PAT. | 83 | * undo PAT. |
72 | */ | 84 | */ |
73 | BUG_ON(boot_pat_state); | 85 | printk(KERN_ERR "PAT enabled, " |
86 | "but not supported by secondary CPU\n"); | ||
87 | BUG(); | ||
74 | } | 88 | } |
75 | 89 | ||
76 | /* Set PWT to Write-Combining. All other bits stay the same */ | 90 | /* Set PWT to Write-Combining. All other bits stay the same */ |
@@ -86,8 +100,8 @@ void pat_init(void) | |||
86 | * 011 UC _PAGE_CACHE_UC | 100 | * 011 UC _PAGE_CACHE_UC |
87 | * PAT bit unused | 101 | * PAT bit unused |
88 | */ | 102 | */ |
89 | pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) | | 103 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
90 | PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); | 104 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); |
91 | 105 | ||
92 | /* Boot CPU check */ | 106 | /* Boot CPU check */ |
93 | if (!boot_pat_state) | 107 | if (!boot_pat_state) |
@@ -103,11 +117,11 @@ void pat_init(void) | |||
103 | static char *cattr_name(unsigned long flags) | 117 | static char *cattr_name(unsigned long flags) |
104 | { | 118 | { |
105 | switch (flags & _PAGE_CACHE_MASK) { | 119 | switch (flags & _PAGE_CACHE_MASK) { |
106 | case _PAGE_CACHE_UC: return "uncached"; | 120 | case _PAGE_CACHE_UC: return "uncached"; |
107 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | 121 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; |
108 | case _PAGE_CACHE_WB: return "write-back"; | 122 | case _PAGE_CACHE_WB: return "write-back"; |
109 | case _PAGE_CACHE_WC: return "write-combining"; | 123 | case _PAGE_CACHE_WC: return "write-combining"; |
110 | default: return "broken"; | 124 | default: return "broken"; |
111 | } | 125 | } |
112 | } | 126 | } |
113 | 127 | ||
@@ -145,46 +159,50 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | |||
145 | * The intersection is based on "Effective Memory Type" tables in IA-32 | 159 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
146 | * SDM vol 3a | 160 | * SDM vol 3a |
147 | */ | 161 | */ |
148 | static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | 162 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
149 | unsigned long *ret_prot) | ||
150 | { | 163 | { |
151 | unsigned long pat_type; | 164 | /* |
152 | u8 mtrr_type; | 165 | * Look for MTRR hint to get the effective type in case where PAT |
153 | 166 | * request is for WB. | |
154 | mtrr_type = mtrr_type_lookup(start, end); | 167 | */ |
155 | if (mtrr_type == 0xFF) { /* MTRR not enabled */ | 168 | if (req_type == _PAGE_CACHE_WB) { |
156 | *ret_prot = prot; | 169 | u8 mtrr_type; |
157 | return 0; | 170 | |
158 | } | 171 | mtrr_type = mtrr_type_lookup(start, end); |
159 | if (mtrr_type == 0xFE) { /* MTRR match error */ | 172 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) |
160 | *ret_prot = _PAGE_CACHE_UC; | 173 | return _PAGE_CACHE_UC; |
161 | return -1; | 174 | if (mtrr_type == MTRR_TYPE_WRCOMB) |
162 | } | 175 | return _PAGE_CACHE_WC; |
163 | if (mtrr_type != MTRR_TYPE_UNCACHABLE && | ||
164 | mtrr_type != MTRR_TYPE_WRBACK && | ||
165 | mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ | ||
166 | *ret_prot = _PAGE_CACHE_UC; | ||
167 | return -1; | ||
168 | } | 176 | } |
169 | 177 | ||
170 | pat_type = prot & _PAGE_CACHE_MASK; | 178 | return req_type; |
171 | prot &= (~_PAGE_CACHE_MASK); | 179 | } |
172 | 180 | ||
173 | /* Currently doing intersection by hand. Optimize it later. */ | 181 | static int chk_conflict(struct memtype *new, struct memtype *entry, |
174 | if (pat_type == _PAGE_CACHE_WC) { | 182 | unsigned long *type) |
175 | *ret_prot = prot | _PAGE_CACHE_WC; | 183 | { |
176 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { | 184 | if (new->type != entry->type) { |
177 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; | 185 | if (type) { |
178 | } else if (pat_type == _PAGE_CACHE_UC || | 186 | new->type = entry->type; |
179 | mtrr_type == MTRR_TYPE_UNCACHABLE) { | 187 | *type = entry->type; |
180 | *ret_prot = prot | _PAGE_CACHE_UC; | 188 | } else |
181 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { | 189 | goto conflict; |
182 | *ret_prot = prot | _PAGE_CACHE_WC; | ||
183 | } else { | ||
184 | *ret_prot = prot | _PAGE_CACHE_WB; | ||
185 | } | 190 | } |
186 | 191 | ||
192 | /* check overlaps with more than one entry in the list */ | ||
193 | list_for_each_entry_continue(entry, &memtype_list, nd) { | ||
194 | if (new->end <= entry->start) | ||
195 | break; | ||
196 | else if (new->type != entry->type) | ||
197 | goto conflict; | ||
198 | } | ||
187 | return 0; | 199 | return 0; |
200 | |||
201 | conflict: | ||
202 | printk(KERN_INFO "%s:%d conflicting memory types " | ||
203 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, | ||
204 | new->end, cattr_name(new->type), cattr_name(entry->type)); | ||
205 | return -EBUSY; | ||
188 | } | 206 | } |
189 | 207 | ||
190 | /* | 208 | /* |
@@ -197,251 +215,134 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | |||
197 | * req_type will have a special case value '-1', when requester want to inherit | 215 | * req_type will have a special case value '-1', when requester want to inherit |
198 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | 216 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. |
199 | * | 217 | * |
200 | * If ret_type is NULL, function will return an error if it cannot reserve the | 218 | * If new_type is NULL, function will return an error if it cannot reserve the |
201 | * region with req_type. If ret_type is non-null, function will return | 219 | * region with req_type. If new_type is non-NULL, function will return |
202 | * available type in ret_type in case of no error. In case of any error | 220 | * available type in new_type in case of no error. In case of any error |
203 | * it will return a negative return value. | 221 | * it will return a negative return value. |
204 | */ | 222 | */ |
205 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | 223 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
206 | unsigned long *ret_type) | 224 | unsigned long *new_type) |
207 | { | 225 | { |
208 | struct memtype *new_entry = NULL; | 226 | struct memtype *new, *entry; |
209 | struct memtype *parse; | ||
210 | unsigned long actual_type; | 227 | unsigned long actual_type; |
228 | struct list_head *where; | ||
211 | int err = 0; | 229 | int err = 0; |
212 | 230 | ||
213 | /* Only track when pat_wc_enabled */ | 231 | BUG_ON(start >= end); /* end is exclusive */ |
214 | if (!pat_wc_enabled) { | 232 | |
233 | if (!pat_enabled) { | ||
215 | /* This is identical to page table setting without PAT */ | 234 | /* This is identical to page table setting without PAT */ |
216 | if (ret_type) { | 235 | if (new_type) { |
217 | if (req_type == -1) { | 236 | if (req_type == -1) |
218 | *ret_type = _PAGE_CACHE_WB; | 237 | *new_type = _PAGE_CACHE_WB; |
219 | } else { | 238 | else |
220 | *ret_type = req_type; | 239 | *new_type = req_type & _PAGE_CACHE_MASK; |
221 | } | ||
222 | } | 240 | } |
223 | return 0; | 241 | return 0; |
224 | } | 242 | } |
225 | 243 | ||
226 | /* Low ISA region is always mapped WB in page table. No need to track */ | 244 | /* Low ISA region is always mapped WB in page table. No need to track */ |
227 | if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) { | 245 | if (is_ISA_range(start, end - 1)) { |
228 | if (ret_type) | 246 | if (new_type) |
229 | *ret_type = _PAGE_CACHE_WB; | 247 | *new_type = _PAGE_CACHE_WB; |
230 | |||
231 | return 0; | 248 | return 0; |
232 | } | 249 | } |
233 | 250 | ||
234 | if (req_type == -1) { | 251 | if (req_type == -1) { |
235 | /* | 252 | /* |
236 | * Special case where caller wants to inherit from mtrr or | 253 | * Call mtrr_lookup to get the type hint. This is an |
237 | * existing pat mapping, defaulting to UC_MINUS in case of | 254 | * optimization for /dev/mem mmap'ers into WB memory (BIOS |
238 | * no match. | 255 | * tools and ACPI tools). Use WB request for WB memory and use |
256 | * UC_MINUS otherwise. | ||
239 | */ | 257 | */ |
240 | u8 mtrr_type = mtrr_type_lookup(start, end); | 258 | u8 mtrr_type = mtrr_type_lookup(start, end); |
241 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
242 | err = -1; | ||
243 | } | ||
244 | 259 | ||
245 | if (mtrr_type == MTRR_TYPE_WRBACK) { | 260 | if (mtrr_type == MTRR_TYPE_WRBACK) |
246 | req_type = _PAGE_CACHE_WB; | ||
247 | actual_type = _PAGE_CACHE_WB; | 261 | actual_type = _PAGE_CACHE_WB; |
248 | } else { | 262 | else |
249 | req_type = _PAGE_CACHE_UC_MINUS; | ||
250 | actual_type = _PAGE_CACHE_UC_MINUS; | 263 | actual_type = _PAGE_CACHE_UC_MINUS; |
251 | } | 264 | } else |
252 | } else { | 265 | actual_type = pat_x_mtrr_type(start, end, |
253 | req_type &= _PAGE_CACHE_MASK; | 266 | req_type & _PAGE_CACHE_MASK); |
254 | err = pat_x_mtrr_type(start, end, req_type, &actual_type); | ||
255 | } | ||
256 | 267 | ||
257 | if (err) { | 268 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
258 | if (ret_type) | 269 | if (!new) |
259 | *ret_type = actual_type; | ||
260 | |||
261 | return -EINVAL; | ||
262 | } | ||
263 | |||
264 | new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | ||
265 | if (!new_entry) | ||
266 | return -ENOMEM; | 270 | return -ENOMEM; |
267 | 271 | ||
268 | new_entry->start = start; | 272 | new->start = start; |
269 | new_entry->end = end; | 273 | new->end = end; |
270 | new_entry->type = actual_type; | 274 | new->type = actual_type; |
271 | 275 | ||
272 | if (ret_type) | 276 | if (new_type) |
273 | *ret_type = actual_type; | 277 | *new_type = actual_type; |
274 | 278 | ||
275 | spin_lock(&memtype_lock); | 279 | spin_lock(&memtype_lock); |
276 | 280 | ||
277 | /* Search for existing mapping that overlaps the current range */ | 281 | /* Search for existing mapping that overlaps the current range */ |
278 | list_for_each_entry(parse, &memtype_list, nd) { | 282 | where = NULL; |
279 | struct memtype *saved_ptr; | 283 | list_for_each_entry(entry, &memtype_list, nd) { |
280 | 284 | if (end <= entry->start) { | |
281 | if (parse->start >= end) { | 285 | where = entry->nd.prev; |
282 | pr_debug("New Entry\n"); | ||
283 | list_add(&new_entry->nd, parse->nd.prev); | ||
284 | new_entry = NULL; | ||
285 | break; | 286 | break; |
286 | } | 287 | } else if (start <= entry->start) { /* end > entry->start */ |
287 | 288 | err = chk_conflict(new, entry, new_type); | |
288 | if (start <= parse->start && end >= parse->start) { | 289 | if (!err) { |
289 | if (actual_type != parse->type && ret_type) { | 290 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
290 | actual_type = parse->type; | 291 | entry->start, entry->end); |
291 | *ret_type = actual_type; | 292 | where = entry->nd.prev; |
292 | new_entry->type = actual_type; | ||
293 | } | ||
294 | |||
295 | if (actual_type != parse->type) { | ||
296 | printk( | ||
297 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
298 | current->comm, current->pid, | ||
299 | start, end, | ||
300 | cattr_name(actual_type), | ||
301 | cattr_name(parse->type)); | ||
302 | err = -EBUSY; | ||
303 | break; | ||
304 | } | ||
305 | |||
306 | saved_ptr = parse; | ||
307 | /* | ||
308 | * Check to see whether the request overlaps more | ||
309 | * than one entry in the list | ||
310 | */ | ||
311 | list_for_each_entry_continue(parse, &memtype_list, nd) { | ||
312 | if (end <= parse->start) { | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | if (actual_type != parse->type) { | ||
317 | printk( | ||
318 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
319 | current->comm, current->pid, | ||
320 | start, end, | ||
321 | cattr_name(actual_type), | ||
322 | cattr_name(parse->type)); | ||
323 | err = -EBUSY; | ||
324 | break; | ||
325 | } | ||
326 | } | 293 | } |
327 | |||
328 | if (err) { | ||
329 | break; | ||
330 | } | ||
331 | |||
332 | pr_debug("Overlap at 0x%Lx-0x%Lx\n", | ||
333 | saved_ptr->start, saved_ptr->end); | ||
334 | /* No conflict. Go ahead and add this new entry */ | ||
335 | list_add(&new_entry->nd, saved_ptr->nd.prev); | ||
336 | new_entry = NULL; | ||
337 | break; | 294 | break; |
338 | } | 295 | } else if (start < entry->end) { /* start > entry->start */ |
339 | 296 | err = chk_conflict(new, entry, new_type); | |
340 | if (start < parse->end) { | 297 | if (!err) { |
341 | if (actual_type != parse->type && ret_type) { | 298 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
342 | actual_type = parse->type; | 299 | entry->start, entry->end); |
343 | *ret_type = actual_type; | 300 | where = &entry->nd; |
344 | new_entry->type = actual_type; | ||
345 | } | ||
346 | |||
347 | if (actual_type != parse->type) { | ||
348 | printk( | ||
349 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
350 | current->comm, current->pid, | ||
351 | start, end, | ||
352 | cattr_name(actual_type), | ||
353 | cattr_name(parse->type)); | ||
354 | err = -EBUSY; | ||
355 | break; | ||
356 | } | 301 | } |
357 | |||
358 | saved_ptr = parse; | ||
359 | /* | ||
360 | * Check to see whether the request overlaps more | ||
361 | * than one entry in the list | ||
362 | */ | ||
363 | list_for_each_entry_continue(parse, &memtype_list, nd) { | ||
364 | if (end <= parse->start) { | ||
365 | break; | ||
366 | } | ||
367 | |||
368 | if (actual_type != parse->type) { | ||
369 | printk( | ||
370 | KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", | ||
371 | current->comm, current->pid, | ||
372 | start, end, | ||
373 | cattr_name(actual_type), | ||
374 | cattr_name(parse->type)); | ||
375 | err = -EBUSY; | ||
376 | break; | ||
377 | } | ||
378 | } | ||
379 | |||
380 | if (err) { | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | pr_debug(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n", | ||
385 | saved_ptr->start, saved_ptr->end); | ||
386 | /* No conflict. Go ahead and add this new entry */ | ||
387 | list_add(&new_entry->nd, &saved_ptr->nd); | ||
388 | new_entry = NULL; | ||
389 | break; | 302 | break; |
390 | } | 303 | } |
391 | } | 304 | } |
392 | 305 | ||
393 | if (err) { | 306 | if (err) { |
394 | printk(KERN_INFO | 307 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " |
395 | "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", | 308 | "track %s, req %s\n", |
396 | start, end, cattr_name(new_entry->type), | 309 | start, end, cattr_name(new->type), cattr_name(req_type)); |
397 | cattr_name(req_type)); | 310 | kfree(new); |
398 | kfree(new_entry); | ||
399 | spin_unlock(&memtype_lock); | 311 | spin_unlock(&memtype_lock); |
400 | return err; | 312 | return err; |
401 | } | 313 | } |
402 | 314 | ||
403 | if (new_entry) { | 315 | if (where) |
404 | /* No conflict. Not yet added to the list. Add to the tail */ | 316 | list_add(&new->nd, where); |
405 | list_add_tail(&new_entry->nd, &memtype_list); | 317 | else |
406 | pr_debug("New Entry\n"); | 318 | list_add_tail(&new->nd, &memtype_list); |
407 | } | ||
408 | |||
409 | if (ret_type) { | ||
410 | pr_debug( | ||
411 | "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | ||
412 | start, end, cattr_name(actual_type), | ||
413 | cattr_name(req_type), cattr_name(*ret_type)); | ||
414 | } else { | ||
415 | pr_debug( | ||
416 | "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n", | ||
417 | start, end, cattr_name(actual_type), | ||
418 | cattr_name(req_type)); | ||
419 | } | ||
420 | 319 | ||
421 | spin_unlock(&memtype_lock); | 320 | spin_unlock(&memtype_lock); |
321 | |||
322 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | ||
323 | start, end, cattr_name(new->type), cattr_name(req_type), | ||
324 | new_type ? cattr_name(*new_type) : "-"); | ||
325 | |||
422 | return err; | 326 | return err; |
423 | } | 327 | } |
424 | 328 | ||
425 | int free_memtype(u64 start, u64 end) | 329 | int free_memtype(u64 start, u64 end) |
426 | { | 330 | { |
427 | struct memtype *ml; | 331 | struct memtype *entry; |
428 | int err = -EINVAL; | 332 | int err = -EINVAL; |
429 | 333 | ||
430 | /* Only track when pat_wc_enabled */ | 334 | if (!pat_enabled) |
431 | if (!pat_wc_enabled) { | ||
432 | return 0; | 335 | return 0; |
433 | } | ||
434 | 336 | ||
435 | /* Low ISA region is always mapped WB. No need to track */ | 337 | /* Low ISA region is always mapped WB. No need to track */ |
436 | if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) { | 338 | if (is_ISA_range(start, end - 1)) |
437 | return 0; | 339 | return 0; |
438 | } | ||
439 | 340 | ||
440 | spin_lock(&memtype_lock); | 341 | spin_lock(&memtype_lock); |
441 | list_for_each_entry(ml, &memtype_list, nd) { | 342 | list_for_each_entry(entry, &memtype_list, nd) { |
442 | if (ml->start == start && ml->end == end) { | 343 | if (entry->start == start && entry->end == end) { |
443 | list_del(&ml->nd); | 344 | list_del(&entry->nd); |
444 | kfree(ml); | 345 | kfree(entry); |
445 | err = 0; | 346 | err = 0; |
446 | break; | 347 | break; |
447 | } | 348 | } |
@@ -453,7 +354,7 @@ int free_memtype(u64 start, u64 end) | |||
453 | current->comm, current->pid, start, end); | 354 | current->comm, current->pid, start, end); |
454 | } | 355 | } |
455 | 356 | ||
456 | pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start, end); | 357 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); |
457 | return err; | 358 | return err; |
458 | } | 359 | } |
459 | 360 | ||
@@ -522,12 +423,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
522 | * caching for the high addresses through the KEN pin, but | 423 | * caching for the high addresses through the KEN pin, but |
523 | * we maintain the tradition of paranoia in this code. | 424 | * we maintain the tradition of paranoia in this code. |
524 | */ | 425 | */ |
525 | if (!pat_wc_enabled && | 426 | if (!pat_enabled && |
526 | ! ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || | 427 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
527 | test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || | 428 | boot_cpu_has(X86_FEATURE_K6_MTRR) || |
528 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | 429 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || |
529 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) && | 430 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && |
530 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | 431 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { |
531 | flags = _PAGE_CACHE_UC; | 432 | flags = _PAGE_CACHE_UC; |
532 | } | 433 | } |
533 | #endif | 434 | #endif |
@@ -549,7 +450,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
549 | return 0; | 450 | return 0; |
550 | 451 | ||
551 | if (pfn <= max_pfn_mapped && | 452 | if (pfn <= max_pfn_mapped && |
552 | ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { | 453 | ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { |
553 | free_memtype(offset, offset + size); | 454 | free_memtype(offset, offset + size); |
554 | printk(KERN_INFO | 455 | printk(KERN_INFO |
555 | "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", | 456 | "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", |
@@ -587,4 +488,3 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |||
587 | 488 | ||
588 | free_memtype(addr, addr + size); | 489 | free_memtype(addr, addr + size); |
589 | } | 490 | } |
590 | |||