diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-16 22:39:03 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 14:59:26 -0500 |
commit | 6f80b68e9e515547edbacb0c37491730bf766db5 (patch) | |
tree | 40d5f129bff2042d53354c0aa5b7b748265a3d13 /arch | |
parent | 9985b4c6fa7d660f685918a58282275e9e35d8e0 (diff) |
x86, mm, Xen: Remove mapping_pagetable_reserve()
Page table area are pre-mapped now after
x86, mm: setup page table in top-down
x86, mm: Remove early_memremap workaround for page table accessing on 64bit
mapping_pagetable_reserve is not used anymore, so remove it.
Also remove operation in mask_rw_pte(), as modified allow_low_page
always return pages that are already mapped, moreover
xen_alloc_pte_init, xen_alloc_pmd_init, etc, will mark the page RO
before hooking it into the pagetable automatically.
-v2: add changelog about mask_rw_pte() from Stefano.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-27-git-send-email-yinghai@kernel.org
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/pgtable_types.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/x86_init.h | 12 | ||||
-rw-r--r-- | arch/x86/kernel/x86_init.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 28 |
5 files changed, 0 insertions, 49 deletions
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index ec8a1fc9505d..79738f20aaf5 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -301,7 +301,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
301 | /* Install a pte for a particular vaddr in kernel space. */ | 301 | /* Install a pte for a particular vaddr in kernel space. */ |
302 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | 302 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); |
303 | 303 | ||
304 | extern void native_pagetable_reserve(u64 start, u64 end); | ||
305 | #ifdef CONFIG_X86_32 | 304 | #ifdef CONFIG_X86_32 |
306 | extern void native_pagetable_init(void); | 305 | extern void native_pagetable_init(void); |
307 | #else | 306 | #else |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 57693498519c..3b2ce8fc995a 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -69,17 +69,6 @@ struct x86_init_oem { | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | /** | 71 | /** |
72 | * struct x86_init_mapping - platform specific initial kernel pagetable setup | ||
73 | * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage | ||
74 | * | ||
75 | * For more details on the purpose of this hook, look in | ||
76 | * init_memory_mapping and the commit that added it. | ||
77 | */ | ||
78 | struct x86_init_mapping { | ||
79 | void (*pagetable_reserve)(u64 start, u64 end); | ||
80 | }; | ||
81 | |||
82 | /** | ||
83 | * struct x86_init_paging - platform specific paging functions | 72 | * struct x86_init_paging - platform specific paging functions |
84 | * @pagetable_init: platform specific paging initialization call to setup | 73 | * @pagetable_init: platform specific paging initialization call to setup |
85 | * the kernel pagetables and prepare accessors functions. | 74 | * the kernel pagetables and prepare accessors functions. |
@@ -136,7 +125,6 @@ struct x86_init_ops { | |||
136 | struct x86_init_mpparse mpparse; | 125 | struct x86_init_mpparse mpparse; |
137 | struct x86_init_irqs irqs; | 126 | struct x86_init_irqs irqs; |
138 | struct x86_init_oem oem; | 127 | struct x86_init_oem oem; |
139 | struct x86_init_mapping mapping; | ||
140 | struct x86_init_paging paging; | 128 | struct x86_init_paging paging; |
141 | struct x86_init_timers timers; | 129 | struct x86_init_timers timers; |
142 | struct x86_init_iommu iommu; | 130 | struct x86_init_iommu iommu; |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 7a3d075a814a..50cf83ecd32e 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -62,10 +62,6 @@ struct x86_init_ops x86_init __initdata = { | |||
62 | .banner = default_banner, | 62 | .banner = default_banner, |
63 | }, | 63 | }, |
64 | 64 | ||
65 | .mapping = { | ||
66 | .pagetable_reserve = native_pagetable_reserve, | ||
67 | }, | ||
68 | |||
69 | .paging = { | 65 | .paging = { |
70 | .pagetable_init = native_pagetable_init, | 66 | .pagetable_init = native_pagetable_init, |
71 | }, | 67 | }, |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 6392bf9a3947..21173fcdb4a1 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -112,10 +112,6 @@ static void __init probe_page_size_mask(void) | |||
112 | __supported_pte_mask |= _PAGE_GLOBAL; | 112 | __supported_pte_mask |= _PAGE_GLOBAL; |
113 | } | 113 | } |
114 | } | 114 | } |
115 | void __init native_pagetable_reserve(u64 start, u64 end) | ||
116 | { | ||
117 | memblock_reserve(start, end - start); | ||
118 | } | ||
119 | 115 | ||
120 | #ifdef CONFIG_X86_32 | 116 | #ifdef CONFIG_X86_32 |
121 | #define NR_RANGE_MR 3 | 117 | #define NR_RANGE_MR 3 |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index dcf5f2dd91ec..bbb883f58bc4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1178,20 +1178,6 @@ static void xen_exit_mmap(struct mm_struct *mm) | |||
1178 | 1178 | ||
1179 | static void xen_post_allocator_init(void); | 1179 | static void xen_post_allocator_init(void); |
1180 | 1180 | ||
1181 | static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) | ||
1182 | { | ||
1183 | /* reserve the range used */ | ||
1184 | native_pagetable_reserve(start, end); | ||
1185 | |||
1186 | /* set as RW the rest */ | ||
1187 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, | ||
1188 | PFN_PHYS(pgt_buf_top)); | ||
1189 | while (end < PFN_PHYS(pgt_buf_top)) { | ||
1190 | make_lowmem_page_readwrite(__va(end)); | ||
1191 | end += PAGE_SIZE; | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | #ifdef CONFIG_X86_64 | 1181 | #ifdef CONFIG_X86_64 |
1196 | static void __init xen_cleanhighmap(unsigned long vaddr, | 1182 | static void __init xen_cleanhighmap(unsigned long vaddr, |
1197 | unsigned long vaddr_end) | 1183 | unsigned long vaddr_end) |
@@ -1503,19 +1489,6 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1503 | #else /* CONFIG_X86_64 */ | 1489 | #else /* CONFIG_X86_64 */ |
1504 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | 1490 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1505 | { | 1491 | { |
1506 | unsigned long pfn = pte_pfn(pte); | ||
1507 | |||
1508 | /* | ||
1509 | * If the new pfn is within the range of the newly allocated | ||
1510 | * kernel pagetable, and it isn't being mapped into an | ||
1511 | * early_ioremap fixmap slot as a freshly allocated page, make sure | ||
1512 | * it is RO. | ||
1513 | */ | ||
1514 | if (((!is_early_ioremap_ptep(ptep) && | ||
1515 | pfn >= pgt_buf_start && pfn < pgt_buf_top)) || | ||
1516 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) | ||
1517 | pte = pte_wrprotect(pte); | ||
1518 | |||
1519 | return pte; | 1492 | return pte; |
1520 | } | 1493 | } |
1521 | #endif /* CONFIG_X86_64 */ | 1494 | #endif /* CONFIG_X86_64 */ |
@@ -2197,7 +2170,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |||
2197 | 2170 | ||
2198 | void __init xen_init_mmu_ops(void) | 2171 | void __init xen_init_mmu_ops(void) |
2199 | { | 2172 | { |
2200 | x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; | ||
2201 | x86_init.paging.pagetable_init = xen_pagetable_init; | 2173 | x86_init.paging.pagetable_init = xen_pagetable_init; |
2202 | pv_mmu_ops = xen_mmu_ops; | 2174 | pv_mmu_ops = xen_mmu_ops; |
2203 | 2175 | ||