diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-17 19:37:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:31 -0400 |
commit | 85958b465c2e0de315575b1d3d7e7c2ce7126880 (patch) | |
tree | c1d6a062bf74b8e172757f4f4259ac567043b8a1 /arch/x86/mm | |
parent | 68db065c845bd9d0eb96946ab104b4c82d0ae9da (diff) |
x86: unify pgd ctor/dtor
All pagetables need fundamentally the same setup and destruction, so
just use the same code for everything.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pgtable.c | 59 |
1 files changed, 13 insertions, 46 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index e2ac320e6151..50159764f694 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -59,50 +59,6 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
59 | list_del(&page->lru); | 59 | list_del(&page->lru); |
60 | } | 60 | } |
61 | 61 | ||
62 | #ifdef CONFIG_X86_64 | ||
63 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
64 | { | ||
65 | unsigned boundary; | ||
66 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
67 | unsigned long flags; | ||
68 | if (!pgd) | ||
69 | return NULL; | ||
70 | spin_lock_irqsave(&pgd_lock, flags); | ||
71 | pgd_list_add(pgd); | ||
72 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
73 | /* | ||
74 | * Copy kernel pointers in from init. | ||
75 | * Could keep a freelist or slab cache of those because the kernel | ||
76 | * part never changes. | ||
77 | */ | ||
78 | boundary = pgd_index(__PAGE_OFFSET); | ||
79 | memset(pgd, 0, boundary * sizeof(pgd_t)); | ||
80 | memcpy(pgd + boundary, | ||
81 | init_level4_pgt + boundary, | ||
82 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | ||
83 | return pgd; | ||
84 | } | ||
85 | |||
86 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
87 | { | ||
88 | unsigned long flags; | ||
89 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | ||
90 | spin_lock_irqsave(&pgd_lock, flags); | ||
91 | pgd_list_del(pgd); | ||
92 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
93 | free_page((unsigned long)pgd); | ||
94 | } | ||
95 | #else | ||
96 | /* | ||
97 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
98 | * in both cached and uncached pgd's; not needed for PAE since the | ||
99 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
100 | * tactic would be needed. This is essentially codepath-based locking | ||
101 | * against pageattr.c; it is the unique case in which a valid change | ||
102 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
103 | * vmalloc faults work because attached pagetables are never freed. | ||
104 | * -- wli | ||
105 | */ | ||
106 | #define UNSHARED_PTRS_PER_PGD \ | 62 | #define UNSHARED_PTRS_PER_PGD \ |
107 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 63 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
108 | 64 | ||
@@ -120,7 +76,8 @@ static void pgd_ctor(void *p) | |||
120 | ptes in non-PAE, or shared PMD in PAE), then just copy the | 76 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
121 | references from swapper_pg_dir. */ | 77 | references from swapper_pg_dir. */ |
122 | if (PAGETABLE_LEVELS == 2 || | 78 | if (PAGETABLE_LEVELS == 2 || |
123 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | 79 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
80 | PAGETABLE_LEVELS == 4) { | ||
124 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, | 81 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
125 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | 82 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
126 | KERNEL_PGD_PTRS); | 83 | KERNEL_PGD_PTRS); |
@@ -149,6 +106,17 @@ static void pgd_dtor(void *pgd) | |||
149 | spin_unlock_irqrestore(&pgd_lock, flags); | 106 | spin_unlock_irqrestore(&pgd_lock, flags); |
150 | } | 107 | } |
151 | 108 | ||
109 | /* | ||
110 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
111 | * in both cached and uncached pgd's; not needed for PAE since the | ||
112 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
113 | * tactic would be needed. This is essentially codepath-based locking | ||
114 | * against pageattr.c; it is the unique case in which a valid change | ||
115 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
116 | * vmalloc faults work because attached pagetables are never freed. | ||
117 | * -- wli | ||
118 | */ | ||
119 | |||
152 | #ifdef CONFIG_X86_PAE | 120 | #ifdef CONFIG_X86_PAE |
153 | /* | 121 | /* |
154 | * Mop up any pmd pages which may still be attached to the pgd. | 122 | * Mop up any pmd pages which may still be attached to the pgd. |
@@ -264,7 +232,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
264 | pgd_dtor(pgd); | 232 | pgd_dtor(pgd); |
265 | free_page((unsigned long)pgd); | 233 | free_page((unsigned long)pgd); |
266 | } | 234 | } |
267 | #endif | ||
268 | 235 | ||
269 | int ptep_set_access_flags(struct vm_area_struct *vma, | 236 | int ptep_set_access_flags(struct vm_area_struct *vma, |
270 | unsigned long address, pte_t *ptep, | 237 | unsigned long address, pte_t *ptep, |