aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ee1091a46964..1500dc8d63e4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -51,6 +51,8 @@
51 51
52unsigned int __VMALLOC_RESERVE = 128 << 20; 52unsigned int __VMALLOC_RESERVE = 128 << 20;
53 53
54unsigned long max_pfn_mapped;
55
54DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 56DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55unsigned long highstart_pfn, highend_pfn; 57unsigned long highstart_pfn, highend_pfn;
56 58
@@ -179,8 +181,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
179 /* 181 /*
180 * Map with big pages if possible, otherwise 182 * Map with big pages if possible, otherwise
181 * create normal page tables: 183 * create normal page tables:
184 *
185 * Don't use a large page for the first 2/4MB of memory
186 * because there are often fixed size MTRRs in there
187 * and overlapping MTRRs into large pages can cause
188 * slowdowns.
182 */ 189 */
183 if (cpu_has_pse) { 190 if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
184 unsigned int addr2; 191 unsigned int addr2;
185 pgprot_t prot = PAGE_KERNEL_LARGE; 192 pgprot_t prot = PAGE_KERNEL_LARGE;
186 193
@@ -194,6 +201,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
194 set_pmd(pmd, pfn_pmd(pfn, prot)); 201 set_pmd(pmd, pfn_pmd(pfn, prot));
195 202
196 pfn += PTRS_PER_PTE; 203 pfn += PTRS_PER_PTE;
204 max_pfn_mapped = pfn;
197 continue; 205 continue;
198 } 206 }
199 pte = one_page_table_init(pmd); 207 pte = one_page_table_init(pmd);
@@ -208,6 +216,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
208 216
209 set_pte(pte, pfn_pte(pfn, prot)); 217 set_pte(pte, pfn_pte(pfn, prot));
210 } 218 }
219 max_pfn_mapped = pfn;
211 } 220 }
212 } 221 }
213} 222}
@@ -723,25 +732,17 @@ void mark_rodata_ro(void)
723 unsigned long start = PFN_ALIGN(_text); 732 unsigned long start = PFN_ALIGN(_text);
724 unsigned long size = PFN_ALIGN(_etext) - start; 733 unsigned long size = PFN_ALIGN(_etext) - start;
725 734
726#ifndef CONFIG_KPROBES 735 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
727#ifdef CONFIG_HOTPLUG_CPU 736 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
728 /* It must still be possible to apply SMP alternatives. */ 737 size >> 10);
729 if (num_possible_cpus() <= 1)
730#endif
731 {
732 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
733 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
734 size >> 10);
735 738
736#ifdef CONFIG_CPA_DEBUG 739#ifdef CONFIG_CPA_DEBUG
737 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", 740 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
738 start, start+size); 741 start, start+size);
739 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); 742 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
740 743
741 printk(KERN_INFO "Testing CPA: write protecting again\n"); 744 printk(KERN_INFO "Testing CPA: write protecting again\n");
742 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); 745 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
743#endif
744 }
745#endif 746#endif
746 start += size; 747 start += size;
747 size = (unsigned long)__end_rodata - start; 748 size = (unsigned long)__end_rodata - start;