aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-09-23 17:00:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-10 13:29:14 -0400
commit0b8fdcbcd287a1fbe66817491e6149841ae25705 (patch)
treead140d2fc2603e095343ee7a135c95ad9621664c /arch
parenta2699e477b8e6b17d4da64916f766dd5a2576c9c (diff)
x86, cpa: dont use large pages for kernel identity mapping with DEBUG_PAGEALLOC
Don't use large pages for kernel identity mapping with DEBUG_PAGEALLOC. This will remove the need to split the large page for the allocated kernel page in the interrupt context. This will simplify cpa code(as we don't do the split any more from the interrupt context). cpa code simplication in the subsequent patches. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: arjan@linux.intel.com Cc: venkatesh.pallipadi@intel.com Cc: jeremy@goop.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/init_32.c18
-rw-r--r--arch/x86/mm/init_64.c26
2 files changed, 34 insertions, 10 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9b5f7d7049d0..44ccb028c350 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -777,7 +777,7 @@ void __init setup_bootmem_allocator(void)
777 after_init_bootmem = 1; 777 after_init_bootmem = 1;
778} 778}
779 779
780static void __init find_early_table_space(unsigned long end) 780static void __init find_early_table_space(unsigned long end, int use_pse)
781{ 781{
782 unsigned long puds, pmds, ptes, tables, start; 782 unsigned long puds, pmds, ptes, tables, start;
783 783
@@ -787,7 +787,7 @@ static void __init find_early_table_space(unsigned long end)
787 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 787 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
788 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 788 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
789 789
790 if (cpu_has_pse) { 790 if (use_pse) {
791 unsigned long extra; 791 unsigned long extra;
792 792
793 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 793 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
@@ -827,12 +827,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
827 pgd_t *pgd_base = swapper_pg_dir; 827 pgd_t *pgd_base = swapper_pg_dir;
828 unsigned long start_pfn, end_pfn; 828 unsigned long start_pfn, end_pfn;
829 unsigned long big_page_start; 829 unsigned long big_page_start;
830#ifdef CONFIG_DEBUG_PAGEALLOC
831 /*
832 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
833 * This will simplify cpa(), which otherwise needs to support splitting
834 * large pages into small in interrupt context, etc.
835 */
836 int use_pse = 0;
837#else
838 int use_pse = cpu_has_pse;
839#endif
830 840
831 /* 841 /*
832 * Find space for the kernel direct mapping tables. 842 * Find space for the kernel direct mapping tables.
833 */ 843 */
834 if (!after_init_bootmem) 844 if (!after_init_bootmem)
835 find_early_table_space(end); 845 find_early_table_space(end, use_pse);
836 846
837#ifdef CONFIG_X86_PAE 847#ifdef CONFIG_X86_PAE
838 set_nx(); 848 set_nx();
@@ -878,7 +888,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
878 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 888 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
879 if (start_pfn < end_pfn) 889 if (start_pfn < end_pfn)
880 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 890 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
881 cpu_has_pse); 891 use_pse);
882 892
883 /* tail is not big page alignment ? */ 893 /* tail is not big page alignment ? */
884 start_pfn = end_pfn; 894 start_pfn = end_pfn;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 1ba945eb6282..9d7587ac1ebc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -456,13 +456,14 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
456 return phys_pud_init(pud, addr, end, page_size_mask); 456 return phys_pud_init(pud, addr, end, page_size_mask);
457} 457}
458 458
459static void __init find_early_table_space(unsigned long end) 459static void __init find_early_table_space(unsigned long end, int use_pse,
460 int use_gbpages)
460{ 461{
461 unsigned long puds, pmds, ptes, tables, start; 462 unsigned long puds, pmds, ptes, tables, start;
462 463
463 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 464 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
464 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 465 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
465 if (direct_gbpages) { 466 if (use_gbpages) {
466 unsigned long extra; 467 unsigned long extra;
467 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 468 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
468 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 469 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
@@ -470,7 +471,7 @@ static void __init find_early_table_space(unsigned long end)
470 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 471 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
471 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 472 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
472 473
473 if (cpu_has_pse) { 474 if (use_pse) {
474 unsigned long extra; 475 unsigned long extra;
475 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 476 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
476 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 477 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -640,6 +641,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
640 641
641 struct map_range mr[NR_RANGE_MR]; 642 struct map_range mr[NR_RANGE_MR];
642 int nr_range, i; 643 int nr_range, i;
644 int use_pse, use_gbpages;
643 645
644 printk(KERN_INFO "init_memory_mapping\n"); 646 printk(KERN_INFO "init_memory_mapping\n");
645 647
@@ -653,9 +655,21 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
653 if (!after_bootmem) 655 if (!after_bootmem)
654 init_gbpages(); 656 init_gbpages();
655 657
656 if (direct_gbpages) 658#ifdef CONFIG_DEBUG_PAGEALLOC
659 /*
660 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
661 * This will simplify cpa(), which otherwise needs to support splitting
662 * large pages into small in interrupt context, etc.
663 */
664 use_pse = use_gbpages = 0;
665#else
666 use_pse = cpu_has_pse;
667 use_gbpages = direct_gbpages;
668#endif
669
670 if (use_gbpages)
657 page_size_mask |= 1 << PG_LEVEL_1G; 671 page_size_mask |= 1 << PG_LEVEL_1G;
658 if (cpu_has_pse) 672 if (use_pse)
659 page_size_mask |= 1 << PG_LEVEL_2M; 673 page_size_mask |= 1 << PG_LEVEL_2M;
660 674
661 memset(mr, 0, sizeof(mr)); 675 memset(mr, 0, sizeof(mr));
@@ -716,7 +730,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
716 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 730 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
717 731
718 if (!after_bootmem) 732 if (!after_bootmem)
719 find_early_table_space(end); 733 find_early_table_space(end, use_pse, use_gbpages);
720 734
721 for (i = 0; i < nr_range; i++) 735 for (i = 0; i < nr_range; i++)
722 last_map_addr = kernel_physical_mapping_init( 736 last_map_addr = kernel_physical_mapping_init(