diff options
-rw-r--r-- | arch/x86/mm/init_32.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 157 | ||||
-rw-r--r-- | include/asm-x86/cacheflush.h | 2 |
4 files changed, 5 insertions, 157 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 44ccb028c350..74780800e7e7 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -1051,7 +1051,6 @@ void __init mem_init(void) | |||
1051 | if (boot_cpu_data.wp_works_ok < 0) | 1051 | if (boot_cpu_data.wp_works_ok < 0) |
1052 | test_wp_bit(); | 1052 | test_wp_bit(); |
1053 | 1053 | ||
1054 | cpa_init(); | ||
1055 | save_pg_dir(); | 1054 | save_pg_dir(); |
1056 | zap_low_mappings(); | 1055 | zap_low_mappings(); |
1057 | } | 1056 | } |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9d7587ac1ebc..f54a4d97530f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -889,8 +889,6 @@ void __init mem_init(void) | |||
889 | reservedpages << (PAGE_SHIFT-10), | 889 | reservedpages << (PAGE_SHIFT-10), |
890 | datasize >> 10, | 890 | datasize >> 10, |
891 | initsize >> 10); | 891 | initsize >> 10); |
892 | |||
893 | cpa_init(); | ||
894 | } | 892 | } |
895 | 893 | ||
896 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 894 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 162812b05d28..f5e8663c0f75 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -447,114 +447,17 @@ out_unlock: | |||
447 | return do_split; | 447 | return do_split; |
448 | } | 448 | } |
449 | 449 | ||
450 | static LIST_HEAD(page_pool); | ||
451 | static unsigned long pool_size, pool_pages, pool_low; | ||
452 | static unsigned long pool_used, pool_failed; | ||
453 | |||
454 | static void cpa_fill_pool(struct page **ret) | ||
455 | { | ||
456 | gfp_t gfp = GFP_KERNEL; | ||
457 | unsigned long flags; | ||
458 | struct page *p; | ||
459 | |||
460 | /* | ||
461 | * Avoid recursion (on debug-pagealloc) and also signal | ||
462 | * our priority to get to these pagetables: | ||
463 | */ | ||
464 | if (current->flags & PF_MEMALLOC) | ||
465 | return; | ||
466 | current->flags |= PF_MEMALLOC; | ||
467 | |||
468 | /* | ||
469 | * Allocate atomically from atomic contexts: | ||
470 | */ | ||
471 | if (in_atomic() || irqs_disabled() || debug_pagealloc) | ||
472 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | ||
473 | |||
474 | while (pool_pages < pool_size || (ret && !*ret)) { | ||
475 | p = alloc_pages(gfp, 0); | ||
476 | if (!p) { | ||
477 | pool_failed++; | ||
478 | break; | ||
479 | } | ||
480 | /* | ||
481 | * If the call site needs a page right now, provide it: | ||
482 | */ | ||
483 | if (ret && !*ret) { | ||
484 | *ret = p; | ||
485 | continue; | ||
486 | } | ||
487 | spin_lock_irqsave(&pgd_lock, flags); | ||
488 | list_add(&p->lru, &page_pool); | ||
489 | pool_pages++; | ||
490 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
491 | } | ||
492 | |||
493 | current->flags &= ~PF_MEMALLOC; | ||
494 | } | ||
495 | |||
496 | #define SHIFT_MB (20 - PAGE_SHIFT) | ||
497 | #define ROUND_MB_GB ((1 << 10) - 1) | ||
498 | #define SHIFT_MB_GB 10 | ||
499 | #define POOL_PAGES_PER_GB 16 | ||
500 | |||
501 | void __init cpa_init(void) | ||
502 | { | ||
503 | struct sysinfo si; | ||
504 | unsigned long gb; | ||
505 | |||
506 | si_meminfo(&si); | ||
507 | /* | ||
508 | * Calculate the number of pool pages: | ||
509 | * | ||
510 | * Convert totalram (nr of pages) to MiB and round to the next | ||
511 | * GiB. Shift MiB to Gib and multiply the result by | ||
512 | * POOL_PAGES_PER_GB: | ||
513 | */ | ||
514 | if (debug_pagealloc) { | ||
515 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; | ||
516 | pool_size = POOL_PAGES_PER_GB * gb; | ||
517 | } else { | ||
518 | pool_size = 1; | ||
519 | } | ||
520 | pool_low = pool_size; | ||
521 | |||
522 | cpa_fill_pool(NULL); | ||
523 | printk(KERN_DEBUG | ||
524 | "CPA: page pool initialized %lu of %lu pages preallocated\n", | ||
525 | pool_pages, pool_size); | ||
526 | } | ||
527 | |||
528 | static int split_large_page(pte_t *kpte, unsigned long address) | 450 | static int split_large_page(pte_t *kpte, unsigned long address) |
529 | { | 451 | { |
530 | unsigned long flags, pfn, pfninc = 1; | 452 | unsigned long flags, pfn, pfninc = 1; |
531 | unsigned int i, level; | 453 | unsigned int i, level; |
532 | pte_t *pbase, *tmp; | 454 | pte_t *pbase, *tmp; |
533 | pgprot_t ref_prot; | 455 | pgprot_t ref_prot; |
534 | struct page *base; | 456 | struct page *base = alloc_pages(GFP_KERNEL, 0); |
457 | if (!base) | ||
458 | return -ENOMEM; | ||
535 | 459 | ||
536 | /* | ||
537 | * Get a page from the pool. The pool list is protected by the | ||
538 | * pgd_lock, which we have to take anyway for the split | ||
539 | * operation: | ||
540 | */ | ||
541 | spin_lock_irqsave(&pgd_lock, flags); | 460 | spin_lock_irqsave(&pgd_lock, flags); |
542 | if (list_empty(&page_pool)) { | ||
543 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
544 | base = NULL; | ||
545 | cpa_fill_pool(&base); | ||
546 | if (!base) | ||
547 | return -ENOMEM; | ||
548 | spin_lock_irqsave(&pgd_lock, flags); | ||
549 | } else { | ||
550 | base = list_first_entry(&page_pool, struct page, lru); | ||
551 | list_del(&base->lru); | ||
552 | pool_pages--; | ||
553 | |||
554 | if (pool_pages < pool_low) | ||
555 | pool_low = pool_pages; | ||
556 | } | ||
557 | |||
558 | /* | 461 | /* |
559 | * Check for races, another CPU might have split this page | 462 | * Check for races, another CPU might have split this page |
560 | * up for us already: | 463 | * up for us already: |
@@ -611,11 +514,8 @@ out_unlock: | |||
611 | * If we dropped out via the lookup_address check under | 514 | * If we dropped out via the lookup_address check under |
612 | * pgd_lock then stick the page back into the pool: | 515 | * pgd_lock then stick the page back into the pool: |
613 | */ | 516 | */ |
614 | if (base) { | 517 | if (base) |
615 | list_add(&base->lru, &page_pool); | 518 | __free_page(base); |
616 | pool_pages++; | ||
617 | } else | ||
618 | pool_used++; | ||
619 | spin_unlock_irqrestore(&pgd_lock, flags); | 519 | spin_unlock_irqrestore(&pgd_lock, flags); |
620 | 520 | ||
621 | return 0; | 521 | return 0; |
@@ -899,8 +799,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
899 | cpa_flush_all(cache); | 799 | cpa_flush_all(cache); |
900 | 800 | ||
901 | out: | 801 | out: |
902 | cpa_fill_pool(NULL); | ||
903 | |||
904 | return ret; | 802 | return ret; |
905 | } | 803 | } |
906 | 804 | ||
@@ -1178,53 +1076,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1178 | * but that can deadlock->flush only current cpu: | 1076 | * but that can deadlock->flush only current cpu: |
1179 | */ | 1077 | */ |
1180 | __flush_tlb_all(); | 1078 | __flush_tlb_all(); |
1181 | |||
1182 | /* | ||
1183 | * Try to refill the page pool here. We can do this only after | ||
1184 | * the tlb flush. | ||
1185 | */ | ||
1186 | cpa_fill_pool(NULL); | ||
1187 | } | ||
1188 | |||
1189 | #ifdef CONFIG_DEBUG_FS | ||
1190 | static int dpa_show(struct seq_file *m, void *v) | ||
1191 | { | ||
1192 | seq_puts(m, "DEBUG_PAGEALLOC\n"); | ||
1193 | seq_printf(m, "pool_size : %lu\n", pool_size); | ||
1194 | seq_printf(m, "pool_pages : %lu\n", pool_pages); | ||
1195 | seq_printf(m, "pool_low : %lu\n", pool_low); | ||
1196 | seq_printf(m, "pool_used : %lu\n", pool_used); | ||
1197 | seq_printf(m, "pool_failed : %lu\n", pool_failed); | ||
1198 | |||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | static int dpa_open(struct inode *inode, struct file *filp) | ||
1203 | { | ||
1204 | return single_open(filp, dpa_show, NULL); | ||
1205 | } | 1079 | } |
1206 | 1080 | ||
1207 | static const struct file_operations dpa_fops = { | ||
1208 | .open = dpa_open, | ||
1209 | .read = seq_read, | ||
1210 | .llseek = seq_lseek, | ||
1211 | .release = single_release, | ||
1212 | }; | ||
1213 | |||
1214 | static int __init debug_pagealloc_proc_init(void) | ||
1215 | { | ||
1216 | struct dentry *de; | ||
1217 | |||
1218 | de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL, | ||
1219 | &dpa_fops); | ||
1220 | if (!de) | ||
1221 | return -ENOMEM; | ||
1222 | |||
1223 | return 0; | ||
1224 | } | ||
1225 | __initcall(debug_pagealloc_proc_init); | ||
1226 | #endif | ||
1227 | |||
1228 | #ifdef CONFIG_HIBERNATION | 1081 | #ifdef CONFIG_HIBERNATION |
1229 | 1082 | ||
1230 | bool kernel_page_present(struct page *page) | 1083 | bool kernel_page_present(struct page *page) |
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 0a5f71817b3e..8e205a131250 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h | |||
@@ -99,8 +99,6 @@ int set_pages_rw(struct page *page, int numpages); | |||
99 | 99 | ||
100 | void clflush_cache_range(void *addr, unsigned int size); | 100 | void clflush_cache_range(void *addr, unsigned int size); |
101 | 101 | ||
102 | void cpa_init(void); | ||
103 | |||
104 | #ifdef CONFIG_DEBUG_RODATA | 102 | #ifdef CONFIG_DEBUG_RODATA |
105 | void mark_rodata_ro(void); | 103 | void mark_rodata_ro(void); |
106 | extern const int rodata_test_data; | 104 | extern const int rodata_test_data; |