diff options
| -rw-r--r-- | arch/x86/mm/pat.c | 170 | ||||
| -rw-r--r-- | arch/x86/mm/pat_internal.h | 28 |
2 files changed, 116 insertions, 82 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index ae9648eb1c7f..628e507b7936 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | #include <asm/pat.h> | 30 | #include <asm/pat.h> |
| 31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
| 32 | 32 | ||
| 33 | #include "pat_internal.h" | ||
| 34 | |||
| 33 | #ifdef CONFIG_X86_PAT | 35 | #ifdef CONFIG_X86_PAT |
| 34 | int __read_mostly pat_enabled = 1; | 36 | int __read_mostly pat_enabled = 1; |
| 35 | 37 | ||
| @@ -53,19 +55,15 @@ static inline void pat_disable(const char *reason) | |||
| 53 | #endif | 55 | #endif |
| 54 | 56 | ||
| 55 | 57 | ||
| 56 | static int debug_enable; | 58 | int pat_debug_enable; |
| 57 | 59 | ||
| 58 | static int __init pat_debug_setup(char *str) | 60 | static int __init pat_debug_setup(char *str) |
| 59 | { | 61 | { |
| 60 | debug_enable = 1; | 62 | pat_debug_enable = 1; |
| 61 | return 0; | 63 | return 0; |
| 62 | } | 64 | } |
| 63 | __setup("debugpat", pat_debug_setup); | 65 | __setup("debugpat", pat_debug_setup); |
| 64 | 66 | ||
| 65 | #define dprintk(fmt, arg...) \ | ||
| 66 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | ||
| 67 | |||
| 68 | |||
| 69 | static u64 __read_mostly boot_pat_state; | 67 | static u64 __read_mostly boot_pat_state; |
| 70 | 68 | ||
| 71 | enum { | 69 | enum { |
| @@ -132,17 +130,6 @@ void pat_init(void) | |||
| 132 | 130 | ||
| 133 | #undef PAT | 131 | #undef PAT |
| 134 | 132 | ||
| 135 | static char *cattr_name(unsigned long flags) | ||
| 136 | { | ||
| 137 | switch (flags & _PAGE_CACHE_MASK) { | ||
| 138 | case _PAGE_CACHE_UC: return "uncached"; | ||
| 139 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | ||
| 140 | case _PAGE_CACHE_WB: return "write-back"; | ||
| 141 | case _PAGE_CACHE_WC: return "write-combining"; | ||
| 142 | default: return "broken"; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | 133 | /* |
| 147 | * The global memtype list keeps track of memory type for specific | 134 | * The global memtype list keeps track of memory type for specific |
| 148 | * physical memory areas. Conflicting memory types in different | 135 | * physical memory areas. Conflicting memory types in different |
| @@ -159,14 +146,6 @@ static char *cattr_name(unsigned long flags) | |||
| 159 | * memtype_lock protects both the linear list and rbtree. | 146 | * memtype_lock protects both the linear list and rbtree. |
| 160 | */ | 147 | */ |
| 161 | 148 | ||
| 162 | struct memtype { | ||
| 163 | u64 start; | ||
| 164 | u64 end; | ||
| 165 | unsigned long type; | ||
| 166 | struct list_head nd; | ||
| 167 | struct rb_node rb; | ||
| 168 | }; | ||
| 169 | |||
| 170 | static struct rb_root memtype_rbroot = RB_ROOT; | 149 | static struct rb_root memtype_rbroot = RB_ROOT; |
| 171 | static LIST_HEAD(memtype_list); | 150 | static LIST_HEAD(memtype_list); |
| 172 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | 151 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ |
| @@ -349,6 +328,64 @@ static int free_ram_pages_type(u64 start, u64 end) | |||
| 349 | return 0; | 328 | return 0; |
| 350 | } | 329 | } |
| 351 | 330 | ||
| 331 | static int memtype_check_insert(struct memtype *new, unsigned long *new_type) | ||
| 332 | { | ||
| 333 | struct memtype *entry; | ||
| 334 | u64 start, end; | ||
| 335 | unsigned long actual_type; | ||
| 336 | struct list_head *where; | ||
| 337 | int err = 0; | ||
| 338 | |||
| 339 | start = new->start; | ||
| 340 | end = new->end; | ||
| 341 | actual_type = new->type; | ||
| 342 | |||
| 343 | /* Search for existing mapping that overlaps the current range */ | ||
| 344 | where = NULL; | ||
| 345 | list_for_each_entry(entry, &memtype_list, nd) { | ||
| 346 | if (end <= entry->start) { | ||
| 347 | where = entry->nd.prev; | ||
| 348 | break; | ||
| 349 | } else if (start <= entry->start) { /* end > entry->start */ | ||
| 350 | err = chk_conflict(new, entry, new_type); | ||
| 351 | if (!err) { | ||
| 352 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | ||
| 353 | entry->start, entry->end); | ||
| 354 | where = entry->nd.prev; | ||
| 355 | } | ||
| 356 | break; | ||
| 357 | } else if (start < entry->end) { /* start > entry->start */ | ||
| 358 | err = chk_conflict(new, entry, new_type); | ||
| 359 | if (!err) { | ||
| 360 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | ||
| 361 | entry->start, entry->end); | ||
| 362 | |||
| 363 | /* | ||
| 364 | * Move to right position in the linked | ||
| 365 | * list to add this new entry | ||
| 366 | */ | ||
| 367 | list_for_each_entry_continue(entry, | ||
| 368 | &memtype_list, nd) { | ||
| 369 | if (start <= entry->start) { | ||
| 370 | where = entry->nd.prev; | ||
| 371 | break; | ||
| 372 | } | ||
| 373 | } | ||
| 374 | } | ||
| 375 | break; | ||
| 376 | } | ||
| 377 | } | ||
| 378 | if (!err) { | ||
| 379 | if (where) | ||
| 380 | list_add(&new->nd, where); | ||
| 381 | else | ||
| 382 | list_add_tail(&new->nd, &memtype_list); | ||
| 383 | |||
| 384 | memtype_rb_insert(&memtype_rbroot, new); | ||
| 385 | } | ||
| 386 | return err; | ||
| 387 | } | ||
| 388 | |||
| 352 | /* | 389 | /* |
| 353 | * req_type typically has one of the: | 390 | * req_type typically has one of the: |
| 354 | * - _PAGE_CACHE_WB | 391 | * - _PAGE_CACHE_WB |
| @@ -364,9 +401,8 @@ static int free_ram_pages_type(u64 start, u64 end) | |||
| 364 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | 401 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
| 365 | unsigned long *new_type) | 402 | unsigned long *new_type) |
| 366 | { | 403 | { |
| 367 | struct memtype *new, *entry; | 404 | struct memtype *new; |
| 368 | unsigned long actual_type; | 405 | unsigned long actual_type; |
| 369 | struct list_head *where; | ||
| 370 | int is_range_ram; | 406 | int is_range_ram; |
| 371 | int err = 0; | 407 | int err = 0; |
| 372 | 408 | ||
| @@ -423,42 +459,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 423 | 459 | ||
| 424 | spin_lock(&memtype_lock); | 460 | spin_lock(&memtype_lock); |
| 425 | 461 | ||
| 426 | /* Search for existing mapping that overlaps the current range */ | 462 | err = memtype_check_insert(new, new_type); |
| 427 | where = NULL; | ||
| 428 | list_for_each_entry(entry, &memtype_list, nd) { | ||
| 429 | if (end <= entry->start) { | ||
| 430 | where = entry->nd.prev; | ||
| 431 | break; | ||
| 432 | } else if (start <= entry->start) { /* end > entry->start */ | ||
| 433 | err = chk_conflict(new, entry, new_type); | ||
| 434 | if (!err) { | ||
| 435 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | ||
| 436 | entry->start, entry->end); | ||
| 437 | where = entry->nd.prev; | ||
| 438 | } | ||
| 439 | break; | ||
| 440 | } else if (start < entry->end) { /* start > entry->start */ | ||
| 441 | err = chk_conflict(new, entry, new_type); | ||
| 442 | if (!err) { | ||
| 443 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | ||
| 444 | entry->start, entry->end); | ||
| 445 | |||
| 446 | /* | ||
| 447 | * Move to right position in the linked | ||
| 448 | * list to add this new entry | ||
| 449 | */ | ||
| 450 | list_for_each_entry_continue(entry, | ||
| 451 | &memtype_list, nd) { | ||
| 452 | if (start <= entry->start) { | ||
| 453 | where = entry->nd.prev; | ||
| 454 | break; | ||
| 455 | } | ||
| 456 | } | ||
| 457 | } | ||
| 458 | break; | ||
| 459 | } | ||
| 460 | } | ||
| 461 | |||
| 462 | if (err) { | 463 | if (err) { |
| 463 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " | 464 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " |
| 464 | "track %s, req %s\n", | 465 | "track %s, req %s\n", |
| @@ -469,13 +470,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 469 | return err; | 470 | return err; |
| 470 | } | 471 | } |
| 471 | 472 | ||
| 472 | if (where) | ||
| 473 | list_add(&new->nd, where); | ||
| 474 | else | ||
| 475 | list_add_tail(&new->nd, &memtype_list); | ||
| 476 | |||
| 477 | memtype_rb_insert(&memtype_rbroot, new); | ||
| 478 | |||
| 479 | spin_unlock(&memtype_lock); | 473 | spin_unlock(&memtype_lock); |
| 480 | 474 | ||
| 481 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | 475 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", |
| @@ -937,28 +931,40 @@ EXPORT_SYMBOL_GPL(pgprot_writecombine); | |||
| 937 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) | 931 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
| 938 | 932 | ||
| 939 | /* get Nth element of the linked list */ | 933 | /* get Nth element of the linked list */ |
| 940 | static struct memtype *memtype_get_idx(loff_t pos) | 934 | static int copy_memtype_nth_element(struct memtype *out, loff_t pos) |
| 941 | { | 935 | { |
| 942 | struct memtype *list_node, *print_entry; | 936 | struct memtype *list_node; |
| 943 | int i = 1; | 937 | int i = 1; |
| 944 | 938 | ||
| 945 | print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | ||
| 946 | if (!print_entry) | ||
| 947 | return NULL; | ||
| 948 | |||
| 949 | spin_lock(&memtype_lock); | ||
| 950 | list_for_each_entry(list_node, &memtype_list, nd) { | 939 | list_for_each_entry(list_node, &memtype_list, nd) { |
| 951 | if (pos == i) { | 940 | if (pos == i) { |
| 952 | *print_entry = *list_node; | 941 | *out = *list_node; |
| 953 | spin_unlock(&memtype_lock); | 942 | return 0; |
| 954 | return print_entry; | ||
| 955 | } | 943 | } |
| 956 | ++i; | 944 | ++i; |
| 957 | } | 945 | } |
| 946 | return 1; | ||
| 947 | } | ||
| 948 | |||
| 949 | static struct memtype *memtype_get_idx(loff_t pos) | ||
| 950 | { | ||
| 951 | struct memtype *print_entry; | ||
| 952 | int ret; | ||
| 953 | |||
| 954 | print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); | ||
| 955 | if (!print_entry) | ||
| 956 | return NULL; | ||
| 957 | |||
| 958 | spin_lock(&memtype_lock); | ||
| 959 | ret = copy_memtype_nth_element(print_entry, pos); | ||
| 958 | spin_unlock(&memtype_lock); | 960 | spin_unlock(&memtype_lock); |
| 959 | kfree(print_entry); | ||
| 960 | 961 | ||
| 961 | return NULL; | 962 | if (!ret) { |
| 963 | return print_entry; | ||
| 964 | } else { | ||
| 965 | kfree(print_entry); | ||
| 966 | return NULL; | ||
| 967 | } | ||
| 962 | } | 968 | } |
| 963 | 969 | ||
| 964 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | 970 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) |
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h new file mode 100644 index 000000000000..6c98780eb731 --- /dev/null +++ b/arch/x86/mm/pat_internal.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #ifndef __PAT_INTERNAL_H_ | ||
| 2 | #define __PAT_INTERNAL_H_ | ||
| 3 | |||
| 4 | extern int pat_debug_enable; | ||
| 5 | |||
| 6 | #define dprintk(fmt, arg...) \ | ||
| 7 | do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | ||
| 8 | |||
| 9 | struct memtype { | ||
| 10 | u64 start; | ||
| 11 | u64 end; | ||
| 12 | unsigned long type; | ||
| 13 | struct list_head nd; | ||
| 14 | struct rb_node rb; | ||
| 15 | }; | ||
| 16 | |||
| 17 | static inline char *cattr_name(unsigned long flags) | ||
| 18 | { | ||
| 19 | switch (flags & _PAGE_CACHE_MASK) { | ||
| 20 | case _PAGE_CACHE_UC: return "uncached"; | ||
| 21 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | ||
| 22 | case _PAGE_CACHE_WB: return "write-back"; | ||
| 23 | case _PAGE_CACHE_WC: return "write-combining"; | ||
| 24 | default: return "broken"; | ||
| 25 | } | ||
| 26 | } | ||
| 27 | |||
| 28 | #endif /* __PAT_INTERNAL_H_ */ | ||
