diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2010-02-26 12:16:02 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-27 17:41:35 -0500 |
commit | dad52fc01161afcb8798c609e009aed4d104927f (patch) | |
tree | 6d31060cc3e484898c3f503204cdfc52cb5d4e03 /arch | |
parent | 3249b7e1df6380e9d7bb3238f64f445bf614f787 (diff) |
x86, paravirt: Remove kmap_atomic_pte paravirt op.
Now that both Xen and VMI disable allocations of PTE pages from high
memory this paravirt op serves no further purpose.
This effectively reverts ce6234b5 "add kmap_atomic_pte for mapping
highpte pages".
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
LKML-Reference: <1267204562-11844-3-git-send-email-ian.campbell@citrix.com>
Acked-by: Alok Kataria <akataria@vmware.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/highmem.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_32.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 20 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 22 |
7 files changed, 2 insertions, 65 deletions
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 014c2b85ae45..a726650fc80f 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | |||
66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
67 | struct page *kmap_atomic_to_page(void *ptr); | 67 | struct page *kmap_atomic_to_page(void *ptr); |
68 | 68 | ||
69 | #ifndef CONFIG_PARAVIRT | ||
70 | #define kmap_atomic_pte(page, type) kmap_atomic(page, type) | ||
71 | #endif | ||
72 | |||
73 | #define flush_cache_kmaps() do { } while (0) | 69 | #define flush_cache_kmaps() do { } while (0) |
74 | 70 | ||
75 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, | 71 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index dd59a85a918f..5653f43d90e5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn) | |||
435 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | 435 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); |
436 | } | 436 | } |
437 | 437 | ||
438 | #ifdef CONFIG_HIGHPTE | ||
439 | static inline void *kmap_atomic_pte(struct page *page, enum km_type type) | ||
440 | { | ||
441 | unsigned long ret; | ||
442 | ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type); | ||
443 | return (void *)ret; | ||
444 | } | ||
445 | #endif | ||
446 | |||
447 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, | 438 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, |
448 | pte_t *ptep) | 439 | pte_t *ptep) |
449 | { | 440 | { |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index b1e70d51e40c..db9ef5532341 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -304,10 +304,6 @@ struct pv_mmu_ops { | |||
304 | #endif /* PAGETABLE_LEVELS == 4 */ | 304 | #endif /* PAGETABLE_LEVELS == 4 */ |
305 | #endif /* PAGETABLE_LEVELS >= 3 */ | 305 | #endif /* PAGETABLE_LEVELS >= 3 */ |
306 | 306 | ||
307 | #ifdef CONFIG_HIGHPTE | ||
308 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | ||
309 | #endif | ||
310 | |||
311 | struct pv_lazy_ops lazy_mode; | 307 | struct pv_lazy_ops lazy_mode; |
312 | 308 | ||
313 | /* dom0 ops */ | 309 | /* dom0 ops */ |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd9461d323..b422d2201af3 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
54 | in_irq() ? KM_IRQ_PTE : \ | 54 | in_irq() ? KM_IRQ_PTE : \ |
55 | KM_PTE0) | 55 | KM_PTE0) |
56 | #define pte_offset_map(dir, address) \ | 56 | #define pte_offset_map(dir, address) \ |
57 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ | 57 | ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ |
58 | pte_index((address))) | 58 | pte_index((address))) |
59 | #define pte_offset_map_nested(dir, address) \ | 59 | #define pte_offset_map_nested(dir, address) \ |
60 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ | 60 | ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \ |
61 | pte_index((address))) | 61 | pte_index((address))) |
62 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) | 62 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) |
63 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | 63 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1b1739d16310..1db183ed7c01 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = { | |||
428 | .ptep_modify_prot_start = __ptep_modify_prot_start, | 428 | .ptep_modify_prot_start = __ptep_modify_prot_start, |
429 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | 429 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, |
430 | 430 | ||
431 | #ifdef CONFIG_HIGHPTE | ||
432 | .kmap_atomic_pte = kmap_atomic, | ||
433 | #endif | ||
434 | |||
435 | #if PAGETABLE_LEVELS >= 3 | 431 | #if PAGETABLE_LEVELS >= 3 |
436 | #ifdef CONFIG_X86_PAE | 432 | #ifdef CONFIG_X86_PAE |
437 | .set_pte_atomic = native_set_pte_atomic, | 433 | .set_pte_atomic = native_set_pte_atomic, |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 58aca86193e5..7dd599deca4a 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -267,22 +267,6 @@ static void vmi_nop(void) | |||
267 | { | 267 | { |
268 | } | 268 | } |
269 | 269 | ||
270 | #ifdef CONFIG_HIGHPTE | ||
271 | static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) | ||
272 | { | ||
273 | void *va = kmap_atomic(page, type); | ||
274 | |||
275 | /* | ||
276 | * We disable highmem allocations for page tables so we should never | ||
277 | * see any calls to kmap_atomic_pte on a highmem page. | ||
278 | */ | ||
279 | |||
280 | BUG_ON(PageHighmem(page)); | ||
281 | |||
282 | return va; | ||
283 | } | ||
284 | #endif | ||
285 | |||
286 | static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) | 270 | static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) |
287 | { | 271 | { |
288 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | 272 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); |
@@ -777,10 +761,6 @@ static inline int __init activate_vmi(void) | |||
777 | 761 | ||
778 | /* Set linear is needed in all cases */ | 762 | /* Set linear is needed in all cases */ |
779 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | 763 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); |
780 | #ifdef CONFIG_HIGHPTE | ||
781 | if (vmi_ops.set_linear_mapping) | ||
782 | pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; | ||
783 | #endif | ||
784 | 764 | ||
785 | /* | 765 | /* |
786 | * These MUST always be patched. Don't support indirect jumps | 766 | * These MUST always be patched. Don't support indirect jumps |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 350a3deedf25..f9eb7de74f42 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1427,24 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1427 | #endif | 1427 | #endif |
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | #ifdef CONFIG_HIGHPTE | ||
1431 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | ||
1432 | { | ||
1433 | pgprot_t prot = PAGE_KERNEL; | ||
1434 | |||
1435 | /* | ||
1436 | * We disable highmem allocations for page tables so we should never | ||
1437 | * see any calls to kmap_atomic_pte on a highmem page. | ||
1438 | */ | ||
1439 | BUG_ON(PageHighMem(page)); | ||
1440 | |||
1441 | if (PagePinned(page)) | ||
1442 | prot = PAGE_KERNEL_RO; | ||
1443 | |||
1444 | return kmap_atomic_prot(page, type, prot); | ||
1445 | } | ||
1446 | #endif | ||
1447 | |||
1448 | #ifdef CONFIG_X86_32 | 1430 | #ifdef CONFIG_X86_32 |
1449 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1431 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
1450 | { | 1432 | { |
@@ -1903,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1903 | .alloc_pmd_clone = paravirt_nop, | 1885 | .alloc_pmd_clone = paravirt_nop, |
1904 | .release_pmd = xen_release_pmd_init, | 1886 | .release_pmd = xen_release_pmd_init, |
1905 | 1887 | ||
1906 | #ifdef CONFIG_HIGHPTE | ||
1907 | .kmap_atomic_pte = xen_kmap_atomic_pte, | ||
1908 | #endif | ||
1909 | |||
1910 | #ifdef CONFIG_X86_64 | 1888 | #ifdef CONFIG_X86_64 |
1911 | .set_pte = xen_set_pte, | 1889 | .set_pte = xen_set_pte, |
1912 | #else | 1890 | #else |