aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c470
1 files changed, 230 insertions, 240 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 06b7a1c90fb8..2fe30916d4b6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -12,6 +12,8 @@
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/debugfs.h>
16#include <linux/seq_file.h>
15 17
16#include <asm/msr.h> 18#include <asm/msr.h>
17#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -26,11 +28,11 @@
26#include <asm/io.h> 28#include <asm/io.h>
27 29
28#ifdef CONFIG_X86_PAT 30#ifdef CONFIG_X86_PAT
29int __read_mostly pat_wc_enabled = 1; 31int __read_mostly pat_enabled = 1;
30 32
31void __cpuinit pat_disable(char *reason) 33void __cpuinit pat_disable(char *reason)
32{ 34{
33 pat_wc_enabled = 0; 35 pat_enabled = 0;
34 printk(KERN_INFO "%s\n", reason); 36 printk(KERN_INFO "%s\n", reason);
35} 37}
36 38
@@ -42,6 +44,19 @@ static int __init nopat(char *str)
42early_param("nopat", nopat); 44early_param("nopat", nopat);
43#endif 45#endif
44 46
47
48static int debug_enable;
49static int __init pat_debug_setup(char *str)
50{
51 debug_enable = 1;
52 return 0;
53}
54__setup("debugpat", pat_debug_setup);
55
56#define dprintk(fmt, arg...) \
57 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
58
59
45static u64 __read_mostly boot_pat_state; 60static u64 __read_mostly boot_pat_state;
46 61
47enum { 62enum {
@@ -53,24 +68,25 @@ enum {
53 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ 68 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
54}; 69};
55 70
56#define PAT(x,y) ((u64)PAT_ ## y << ((x)*8)) 71#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
57 72
58void pat_init(void) 73void pat_init(void)
59{ 74{
60 u64 pat; 75 u64 pat;
61 76
62 if (!pat_wc_enabled) 77 if (!pat_enabled)
63 return; 78 return;
64 79
65 /* Paranoia check. */ 80 /* Paranoia check. */
66 if (!cpu_has_pat) { 81 if (!cpu_has_pat && boot_pat_state) {
67 printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
68 /* 82 /*
69 * Panic if this happens on the secondary CPU, and we 83 * If this happens we are on a secondary CPU, but
70 * switched to PAT on the boot CPU. We have no way to 84 * switched to PAT on the boot CPU. We have no way to
71 * undo PAT. 85 * undo PAT.
72 */ 86 */
73 BUG_ON(boot_pat_state); 87 printk(KERN_ERR "PAT enabled, "
88 "but not supported by secondary CPU\n");
89 BUG();
74 } 90 }
75 91
76 /* Set PWT to Write-Combining. All other bits stay the same */ 92 /* Set PWT to Write-Combining. All other bits stay the same */
@@ -86,8 +102,8 @@ void pat_init(void)
86 * 011 UC _PAGE_CACHE_UC 102 * 011 UC _PAGE_CACHE_UC
87 * PAT bit unused 103 * PAT bit unused
88 */ 104 */
89 pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) | 105 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
90 PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); 106 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
91 107
92 /* Boot CPU check */ 108 /* Boot CPU check */
93 if (!boot_pat_state) 109 if (!boot_pat_state)
@@ -103,11 +119,11 @@ void pat_init(void)
103static char *cattr_name(unsigned long flags) 119static char *cattr_name(unsigned long flags)
104{ 120{
105 switch (flags & _PAGE_CACHE_MASK) { 121 switch (flags & _PAGE_CACHE_MASK) {
106 case _PAGE_CACHE_UC: return "uncached"; 122 case _PAGE_CACHE_UC: return "uncached";
107 case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 123 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
108 case _PAGE_CACHE_WB: return "write-back"; 124 case _PAGE_CACHE_WB: return "write-back";
109 case _PAGE_CACHE_WC: return "write-combining"; 125 case _PAGE_CACHE_WC: return "write-combining";
110 default: return "broken"; 126 default: return "broken";
111 } 127 }
112} 128}
113 129
@@ -145,47 +161,50 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
145 * The intersection is based on "Effective Memory Type" tables in IA-32 161 * The intersection is based on "Effective Memory Type" tables in IA-32
146 * SDM vol 3a 162 * SDM vol 3a
147 */ 163 */
148static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, 164static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
149 unsigned long *ret_prot)
150{ 165{
151 unsigned long pat_type;
152 u8 mtrr_type;
153
154 pat_type = prot & _PAGE_CACHE_MASK;
155 prot &= (~_PAGE_CACHE_MASK);
156
157 /*
158 * We return the PAT request directly for types where PAT takes
159 * precedence with respect to MTRR and for UC_MINUS.
160 * Consistency checks with other PAT requests is done later
161 * while going through memtype list.
162 */
163 if (pat_type == _PAGE_CACHE_WC) {
164 *ret_prot = prot | _PAGE_CACHE_WC;
165 return 0;
166 } else if (pat_type == _PAGE_CACHE_UC_MINUS) {
167 *ret_prot = prot | _PAGE_CACHE_UC_MINUS;
168 return 0;
169 } else if (pat_type == _PAGE_CACHE_UC) {
170 *ret_prot = prot | _PAGE_CACHE_UC;
171 return 0;
172 }
173
174 /* 166 /*
175 * Look for MTRR hint to get the effective type in case where PAT 167 * Look for MTRR hint to get the effective type in case where PAT
176 * request is for WB. 168 * request is for WB.
177 */ 169 */
178 mtrr_type = mtrr_type_lookup(start, end); 170 if (req_type == _PAGE_CACHE_WB) {
171 u8 mtrr_type;
172
173 mtrr_type = mtrr_type_lookup(start, end);
174 if (mtrr_type == MTRR_TYPE_UNCACHABLE)
175 return _PAGE_CACHE_UC;
176 if (mtrr_type == MTRR_TYPE_WRCOMB)
177 return _PAGE_CACHE_WC;
178 }
179 179
180 if (mtrr_type == MTRR_TYPE_UNCACHABLE) { 180 return req_type;
181 *ret_prot = prot | _PAGE_CACHE_UC; 181}
182 } else if (mtrr_type == MTRR_TYPE_WRCOMB) { 182
183 *ret_prot = prot | _PAGE_CACHE_WC; 183static int chk_conflict(struct memtype *new, struct memtype *entry,
184 } else { 184 unsigned long *type)
185 *ret_prot = prot | _PAGE_CACHE_WB; 185{
186 if (new->type != entry->type) {
187 if (type) {
188 new->type = entry->type;
189 *type = entry->type;
190 } else
191 goto conflict;
186 } 192 }
187 193
194 /* check overlaps with more than one entry in the list */
195 list_for_each_entry_continue(entry, &memtype_list, nd) {
196 if (new->end <= entry->start)
197 break;
198 else if (new->type != entry->type)
199 goto conflict;
200 }
188 return 0; 201 return 0;
202
203 conflict:
204 printk(KERN_INFO "%s:%d conflicting memory types "
205 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
206 new->end, cattr_name(new->type), cattr_name(entry->type));
207 return -EBUSY;
189} 208}
190 209
191/* 210/*
@@ -198,37 +217,36 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
198 * req_type will have a special case value '-1', when requester want to inherit 217 * req_type will have a special case value '-1', when requester want to inherit
199 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. 218 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
200 * 219 *
201 * If ret_type is NULL, function will return an error if it cannot reserve the 220 * If new_type is NULL, function will return an error if it cannot reserve the
202 * region with req_type. If ret_type is non-null, function will return 221 * region with req_type. If new_type is non-NULL, function will return
203 * available type in ret_type in case of no error. In case of any error 222 * available type in new_type in case of no error. In case of any error
204 * it will return a negative return value. 223 * it will return a negative return value.
205 */ 224 */
206int reserve_memtype(u64 start, u64 end, unsigned long req_type, 225int reserve_memtype(u64 start, u64 end, unsigned long req_type,
207 unsigned long *ret_type) 226 unsigned long *new_type)
208{ 227{
209 struct memtype *new_entry = NULL; 228 struct memtype *new, *entry;
210 struct memtype *parse;
211 unsigned long actual_type; 229 unsigned long actual_type;
230 struct list_head *where;
212 int err = 0; 231 int err = 0;
213 232
214 /* Only track when pat_wc_enabled */ 233 BUG_ON(start >= end); /* end is exclusive */
215 if (!pat_wc_enabled) { 234
235 if (!pat_enabled) {
216 /* This is identical to page table setting without PAT */ 236 /* This is identical to page table setting without PAT */
217 if (ret_type) { 237 if (new_type) {
218 if (req_type == -1) { 238 if (req_type == -1)
219 *ret_type = _PAGE_CACHE_WB; 239 *new_type = _PAGE_CACHE_WB;
220 } else { 240 else
221 *ret_type = req_type; 241 *new_type = req_type & _PAGE_CACHE_MASK;
222 }
223 } 242 }
224 return 0; 243 return 0;
225 } 244 }
226 245
227 /* Low ISA region is always mapped WB in page table. No need to track */ 246 /* Low ISA region is always mapped WB in page table. No need to track */
228 if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) { 247 if (is_ISA_range(start, end - 1)) {
229 if (ret_type) 248 if (new_type)
230 *ret_type = _PAGE_CACHE_WB; 249 *new_type = _PAGE_CACHE_WB;
231
232 return 0; 250 return 0;
233 } 251 }
234 252
@@ -241,206 +259,92 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
241 */ 259 */
242 u8 mtrr_type = mtrr_type_lookup(start, end); 260 u8 mtrr_type = mtrr_type_lookup(start, end);
243 261
244 if (mtrr_type == MTRR_TYPE_WRBACK) { 262 if (mtrr_type == MTRR_TYPE_WRBACK)
245 req_type = _PAGE_CACHE_WB;
246 actual_type = _PAGE_CACHE_WB; 263 actual_type = _PAGE_CACHE_WB;
247 } else { 264 else
248 req_type = _PAGE_CACHE_UC_MINUS;
249 actual_type = _PAGE_CACHE_UC_MINUS; 265 actual_type = _PAGE_CACHE_UC_MINUS;
250 } 266 } else
251 } else { 267 actual_type = pat_x_mtrr_type(start, end,
252 req_type &= _PAGE_CACHE_MASK; 268 req_type & _PAGE_CACHE_MASK);
253 err = pat_x_mtrr_type(start, end, req_type, &actual_type);
254 }
255
256 if (err) {
257 if (ret_type)
258 *ret_type = actual_type;
259 269
260 return -EINVAL; 270 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
261 } 271 if (!new)
262
263 new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
264 if (!new_entry)
265 return -ENOMEM; 272 return -ENOMEM;
266 273
267 new_entry->start = start; 274 new->start = start;
268 new_entry->end = end; 275 new->end = end;
269 new_entry->type = actual_type; 276 new->type = actual_type;
270 277
271 if (ret_type) 278 if (new_type)
272 *ret_type = actual_type; 279 *new_type = actual_type;
273 280
274 spin_lock(&memtype_lock); 281 spin_lock(&memtype_lock);
275 282
276 /* Search for existing mapping that overlaps the current range */ 283 /* Search for existing mapping that overlaps the current range */
277 list_for_each_entry(parse, &memtype_list, nd) { 284 where = NULL;
278 struct memtype *saved_ptr; 285 list_for_each_entry(entry, &memtype_list, nd) {
279 286 if (end <= entry->start) {
280 if (parse->start >= end) { 287 where = entry->nd.prev;
281 pr_debug("New Entry\n");
282 list_add(&new_entry->nd, parse->nd.prev);
283 new_entry = NULL;
284 break; 288 break;
285 } 289 } else if (start <= entry->start) { /* end > entry->start */
286 290 err = chk_conflict(new, entry, new_type);
287 if (start <= parse->start && end >= parse->start) { 291 if (!err) {
288 if (actual_type != parse->type && ret_type) { 292 dprintk("Overlap at 0x%Lx-0x%Lx\n",
289 actual_type = parse->type; 293 entry->start, entry->end);
290 *ret_type = actual_type; 294 where = entry->nd.prev;
291 new_entry->type = actual_type;
292 } 295 }
293
294 if (actual_type != parse->type) {
295 printk(
296 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
297 current->comm, current->pid,
298 start, end,
299 cattr_name(actual_type),
300 cattr_name(parse->type));
301 err = -EBUSY;
302 break;
303 }
304
305 saved_ptr = parse;
306 /*
307 * Check to see whether the request overlaps more
308 * than one entry in the list
309 */
310 list_for_each_entry_continue(parse, &memtype_list, nd) {
311 if (end <= parse->start) {
312 break;
313 }
314
315 if (actual_type != parse->type) {
316 printk(
317 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
318 current->comm, current->pid,
319 start, end,
320 cattr_name(actual_type),
321 cattr_name(parse->type));
322 err = -EBUSY;
323 break;
324 }
325 }
326
327 if (err) {
328 break;
329 }
330
331 pr_debug("Overlap at 0x%Lx-0x%Lx\n",
332 saved_ptr->start, saved_ptr->end);
333 /* No conflict. Go ahead and add this new entry */
334 list_add(&new_entry->nd, saved_ptr->nd.prev);
335 new_entry = NULL;
336 break; 296 break;
337 } 297 } else if (start < entry->end) { /* start > entry->start */
338 298 err = chk_conflict(new, entry, new_type);
339 if (start < parse->end) { 299 if (!err) {
340 if (actual_type != parse->type && ret_type) { 300 dprintk("Overlap at 0x%Lx-0x%Lx\n",
341 actual_type = parse->type; 301 entry->start, entry->end);
342 *ret_type = actual_type; 302 where = &entry->nd;
343 new_entry->type = actual_type;
344 }
345
346 if (actual_type != parse->type) {
347 printk(
348 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
349 current->comm, current->pid,
350 start, end,
351 cattr_name(actual_type),
352 cattr_name(parse->type));
353 err = -EBUSY;
354 break;
355 }
356
357 saved_ptr = parse;
358 /*
359 * Check to see whether the request overlaps more
360 * than one entry in the list
361 */
362 list_for_each_entry_continue(parse, &memtype_list, nd) {
363 if (end <= parse->start) {
364 break;
365 }
366
367 if (actual_type != parse->type) {
368 printk(
369 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
370 current->comm, current->pid,
371 start, end,
372 cattr_name(actual_type),
373 cattr_name(parse->type));
374 err = -EBUSY;
375 break;
376 }
377 } 303 }
378
379 if (err) {
380 break;
381 }
382
383 pr_debug(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
384 saved_ptr->start, saved_ptr->end);
385 /* No conflict. Go ahead and add this new entry */
386 list_add(&new_entry->nd, &saved_ptr->nd);
387 new_entry = NULL;
388 break; 304 break;
389 } 305 }
390 } 306 }
391 307
392 if (err) { 308 if (err) {
393 printk(KERN_INFO 309 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
394 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", 310 "track %s, req %s\n",
395 start, end, cattr_name(new_entry->type), 311 start, end, cattr_name(new->type), cattr_name(req_type));
396 cattr_name(req_type)); 312 kfree(new);
397 kfree(new_entry);
398 spin_unlock(&memtype_lock); 313 spin_unlock(&memtype_lock);
399 return err; 314 return err;
400 } 315 }
401 316
402 if (new_entry) { 317 if (where)
403 /* No conflict. Not yet added to the list. Add to the tail */ 318 list_add(&new->nd, where);
404 list_add_tail(&new_entry->nd, &memtype_list); 319 else
405 pr_debug("New Entry\n"); 320 list_add_tail(&new->nd, &memtype_list);
406 }
407
408 if (ret_type) {
409 pr_debug(
410 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
411 start, end, cattr_name(actual_type),
412 cattr_name(req_type), cattr_name(*ret_type));
413 } else {
414 pr_debug(
415 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
416 start, end, cattr_name(actual_type),
417 cattr_name(req_type));
418 }
419 321
420 spin_unlock(&memtype_lock); 322 spin_unlock(&memtype_lock);
323
324 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
325 start, end, cattr_name(new->type), cattr_name(req_type),
326 new_type ? cattr_name(*new_type) : "-");
327
421 return err; 328 return err;
422} 329}
423 330
424int free_memtype(u64 start, u64 end) 331int free_memtype(u64 start, u64 end)
425{ 332{
426 struct memtype *ml; 333 struct memtype *entry;
427 int err = -EINVAL; 334 int err = -EINVAL;
428 335
429 /* Only track when pat_wc_enabled */ 336 if (!pat_enabled)
430 if (!pat_wc_enabled) {
431 return 0; 337 return 0;
432 }
433 338
434 /* Low ISA region is always mapped WB. No need to track */ 339 /* Low ISA region is always mapped WB. No need to track */
435 if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) { 340 if (is_ISA_range(start, end - 1))
436 return 0; 341 return 0;
437 }
438 342
439 spin_lock(&memtype_lock); 343 spin_lock(&memtype_lock);
440 list_for_each_entry(ml, &memtype_list, nd) { 344 list_for_each_entry(entry, &memtype_list, nd) {
441 if (ml->start == start && ml->end == end) { 345 if (entry->start == start && entry->end == end) {
442 list_del(&ml->nd); 346 list_del(&entry->nd);
443 kfree(ml); 347 kfree(entry);
444 err = 0; 348 err = 0;
445 break; 349 break;
446 } 350 }
@@ -452,7 +356,7 @@ int free_memtype(u64 start, u64 end)
452 current->comm, current->pid, start, end); 356 current->comm, current->pid, start, end);
453 } 357 }
454 358
455 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start, end); 359 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
456 return err; 360 return err;
457} 361}
458 362
@@ -471,8 +375,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
471 return vma_prot; 375 return vma_prot;
472} 376}
473 377
474#ifdef CONFIG_NONPROMISC_DEVMEM 378#ifdef CONFIG_STRICT_DEVMEM
475/* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/ 379/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
476static inline int range_is_allowed(unsigned long pfn, unsigned long size) 380static inline int range_is_allowed(unsigned long pfn, unsigned long size)
477{ 381{
478 return 1; 382 return 1;
@@ -496,7 +400,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
496 } 400 }
497 return 1; 401 return 1;
498} 402}
499#endif /* CONFIG_NONPROMISC_DEVMEM */ 403#endif /* CONFIG_STRICT_DEVMEM */
500 404
501int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 405int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
502 unsigned long size, pgprot_t *vma_prot) 406 unsigned long size, pgprot_t *vma_prot)
@@ -521,12 +425,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
521 * caching for the high addresses through the KEN pin, but 425 * caching for the high addresses through the KEN pin, but
522 * we maintain the tradition of paranoia in this code. 426 * we maintain the tradition of paranoia in this code.
523 */ 427 */
524 if (!pat_wc_enabled && 428 if (!pat_enabled &&
525 ! ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || 429 !(boot_cpu_has(X86_FEATURE_MTRR) ||
526 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || 430 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
527 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || 431 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
528 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) && 432 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
529 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { 433 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
530 flags = _PAGE_CACHE_UC; 434 flags = _PAGE_CACHE_UC;
531 } 435 }
532#endif 436#endif
@@ -547,8 +451,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
547 if (retval < 0) 451 if (retval < 0)
548 return 0; 452 return 0;
549 453
550 if (pfn <= max_pfn_mapped && 454 if (((pfn < max_low_pfn_mapped) ||
551 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { 455 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
456 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
552 free_memtype(offset, offset + size); 457 free_memtype(offset, offset + size);
553 printk(KERN_INFO 458 printk(KERN_INFO
554 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", 459 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -587,3 +492,88 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
587 free_memtype(addr, addr + size); 492 free_memtype(addr, addr + size);
588} 493}
589 494
495#if defined(CONFIG_DEBUG_FS)
496
497/* get Nth element of the linked list */
498static struct memtype *memtype_get_idx(loff_t pos)
499{
500 struct memtype *list_node, *print_entry;
501 int i = 1;
502
503 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
504 if (!print_entry)
505 return NULL;
506
507 spin_lock(&memtype_lock);
508 list_for_each_entry(list_node, &memtype_list, nd) {
509 if (pos == i) {
510 *print_entry = *list_node;
511 spin_unlock(&memtype_lock);
512 return print_entry;
513 }
514 ++i;
515 }
516 spin_unlock(&memtype_lock);
517 kfree(print_entry);
518 return NULL;
519}
520
521static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
522{
523 if (*pos == 0) {
524 ++*pos;
525 seq_printf(seq, "PAT memtype list:\n");
526 }
527
528 return memtype_get_idx(*pos);
529}
530
531static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
532{
533 ++*pos;
534 return memtype_get_idx(*pos);
535}
536
537static void memtype_seq_stop(struct seq_file *seq, void *v)
538{
539}
540
541static int memtype_seq_show(struct seq_file *seq, void *v)
542{
543 struct memtype *print_entry = (struct memtype *)v;
544
545 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
546 print_entry->start, print_entry->end);
547 kfree(print_entry);
548 return 0;
549}
550
551static struct seq_operations memtype_seq_ops = {
552 .start = memtype_seq_start,
553 .next = memtype_seq_next,
554 .stop = memtype_seq_stop,
555 .show = memtype_seq_show,
556};
557
558static int memtype_seq_open(struct inode *inode, struct file *file)
559{
560 return seq_open(file, &memtype_seq_ops);
561}
562
563static const struct file_operations memtype_fops = {
564 .open = memtype_seq_open,
565 .read = seq_read,
566 .llseek = seq_lseek,
567 .release = seq_release,
568};
569
570static int __init pat_memtype_list_init(void)
571{
572 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
573 NULL, &memtype_fops);
574 return 0;
575}
576
577late_initcall(pat_memtype_list_init);
578
579#endif /* CONFIG_DEBUG_FS */