diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:37:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:31 -0400 |
commit | 85958b465c2e0de315575b1d3d7e7c2ce7126880 (patch) | |
tree | c1d6a062bf74b8e172757f4f4259ac567043b8a1 | |
parent | 68db065c845bd9d0eb96946ab104b4c82d0ae9da (diff) |
x86: unify pgd ctor/dtor
All pagetables need fundamentally the same setup and destruction, so
just use the same code for everything.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/mm/pgtable.c | 59 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 16 | ||||
-rw-r--r-- | include/asm-x86/pgtable_32.h | 15 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 2 |
4 files changed, 30 insertions, 62 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index e2ac320e6151..50159764f694 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -59,50 +59,6 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
59 | list_del(&page->lru); | 59 | list_del(&page->lru); |
60 | } | 60 | } |
61 | 61 | ||
62 | #ifdef CONFIG_X86_64 | ||
63 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
64 | { | ||
65 | unsigned boundary; | ||
66 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
67 | unsigned long flags; | ||
68 | if (!pgd) | ||
69 | return NULL; | ||
70 | spin_lock_irqsave(&pgd_lock, flags); | ||
71 | pgd_list_add(pgd); | ||
72 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
73 | /* | ||
74 | * Copy kernel pointers in from init. | ||
75 | * Could keep a freelist or slab cache of those because the kernel | ||
76 | * part never changes. | ||
77 | */ | ||
78 | boundary = pgd_index(__PAGE_OFFSET); | ||
79 | memset(pgd, 0, boundary * sizeof(pgd_t)); | ||
80 | memcpy(pgd + boundary, | ||
81 | init_level4_pgt + boundary, | ||
82 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | ||
83 | return pgd; | ||
84 | } | ||
85 | |||
86 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
87 | { | ||
88 | unsigned long flags; | ||
89 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | ||
90 | spin_lock_irqsave(&pgd_lock, flags); | ||
91 | pgd_list_del(pgd); | ||
92 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
93 | free_page((unsigned long)pgd); | ||
94 | } | ||
95 | #else | ||
96 | /* | ||
97 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
98 | * in both cached and uncached pgd's; not needed for PAE since the | ||
99 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
100 | * tactic would be needed. This is essentially codepath-based locking | ||
101 | * against pageattr.c; it is the unique case in which a valid change | ||
102 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
103 | * vmalloc faults work because attached pagetables are never freed. | ||
104 | * -- wli | ||
105 | */ | ||
106 | #define UNSHARED_PTRS_PER_PGD \ | 62 | #define UNSHARED_PTRS_PER_PGD \ |
107 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 63 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
108 | 64 | ||
@@ -120,7 +76,8 @@ static void pgd_ctor(void *p) | |||
120 | ptes in non-PAE, or shared PMD in PAE), then just copy the | 76 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
121 | references from swapper_pg_dir. */ | 77 | references from swapper_pg_dir. */ |
122 | if (PAGETABLE_LEVELS == 2 || | 78 | if (PAGETABLE_LEVELS == 2 || |
123 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | 79 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
80 | PAGETABLE_LEVELS == 4) { | ||
124 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, | 81 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
125 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | 82 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
126 | KERNEL_PGD_PTRS); | 83 | KERNEL_PGD_PTRS); |
@@ -149,6 +106,17 @@ static void pgd_dtor(void *pgd) | |||
149 | spin_unlock_irqrestore(&pgd_lock, flags); | 106 | spin_unlock_irqrestore(&pgd_lock, flags); |
150 | } | 107 | } |
151 | 108 | ||
109 | /* | ||
110 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
111 | * in both cached and uncached pgd's; not needed for PAE since the | ||
112 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
113 | * tactic would be needed. This is essentially codepath-based locking | ||
114 | * against pageattr.c; it is the unique case in which a valid change | ||
115 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
116 | * vmalloc faults work because attached pagetables are never freed. | ||
117 | * -- wli | ||
118 | */ | ||
119 | |||
152 | #ifdef CONFIG_X86_PAE | 120 | #ifdef CONFIG_X86_PAE |
153 | /* | 121 | /* |
154 | * Mop up any pmd pages which may still be attached to the pgd. | 122 | * Mop up any pmd pages which may still be attached to the pgd. |
@@ -264,7 +232,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
264 | pgd_dtor(pgd); | 232 | pgd_dtor(pgd); |
265 | free_page((unsigned long)pgd); | 233 | free_page((unsigned long)pgd); |
266 | } | 234 | } |
267 | #endif | ||
268 | 235 | ||
269 | int ptep_set_access_flags(struct vm_area_struct *vma, | 236 | int ptep_set_access_flags(struct vm_area_struct *vma, |
270 | unsigned long address, pte_t *ptep, | 237 | unsigned long address, pte_t *ptep, |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index e61075e70a54..b8a08bd7bd48 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -438,6 +438,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, | |||
438 | pte_update(mm, addr, ptep); | 438 | pte_update(mm, addr, ptep); |
439 | } | 439 | } |
440 | 440 | ||
441 | /* | ||
442 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
443 | * | ||
444 | * dst - pointer to pgd range anwhere on a pgd page | ||
445 | * src - "" | ||
446 | * count - the number of pgds to copy. | ||
447 | * | ||
448 | * dst and src can be on the same page, but the range must not overlap, | ||
449 | * and must not cross a page boundary. | ||
450 | */ | ||
451 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
452 | { | ||
453 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
454 | } | ||
455 | |||
456 | |||
441 | #include <asm-generic/pgtable.h> | 457 | #include <asm-generic/pgtable.h> |
442 | #endif /* __ASSEMBLY__ */ | 458 | #endif /* __ASSEMBLY__ */ |
443 | 459 | ||
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index cc52da32fbe2..168b6447cf18 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -106,21 +106,6 @@ extern int pmd_bad(pmd_t pmd); | |||
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
110 | * | ||
111 | * dst - pointer to pgd range anwhere on a pgd page | ||
112 | * src - "" | ||
113 | * count - the number of pgds to copy. | ||
114 | * | ||
115 | * dst and src can be on the same page, but the range must not overlap, | ||
116 | * and must not cross a page boundary. | ||
117 | */ | ||
118 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
119 | { | ||
120 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Macro to mark a page protection value as "uncacheable". | 109 | * Macro to mark a page protection value as "uncacheable". |
125 | * On processors which do not support it, this is a no-op. | 110 | * On processors which do not support it, this is a no-op. |
126 | */ | 111 | */ |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 9fd87d0b6477..a3bbf8766c1d 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -24,7 +24,7 @@ extern void paging_init(void); | |||
24 | 24 | ||
25 | #endif /* !__ASSEMBLY__ */ | 25 | #endif /* !__ASSEMBLY__ */ |
26 | 26 | ||
27 | #define SHARED_KERNEL_PMD 1 | 27 | #define SHARED_KERNEL_PMD 0 |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * PGDIR_SHIFT determines what a top-level page table entry can map | 30 | * PGDIR_SHIFT determines what a top-level page table entry can map |