diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 07:34:11 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:11 -0500 |
commit | e3ed910db221768f8fd6192b13373e17d61bcdf0 (patch) | |
tree | 67a5eed4709a4b1b38e952af5bfc0340f78703af /arch | |
parent | fa28ba21cec24d3fa1279bcae7e5d5ff6224635a (diff) |
x86: use the same pgd_list for PAE and 64-bit
Use a standard list threaded through page->lru for maintaining the pgd
list on PAE. This is the same as 64-bit, and seems saner than using a
non-standard list via page->index.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/fault.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 19 |
3 files changed, 9 insertions, 22 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 72547a7e32c6..e28cc5277b16 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -907,10 +907,8 @@ do_sigbus: | |||
907 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | 907 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
908 | } | 908 | } |
909 | 909 | ||
910 | #ifdef CONFIG_X86_64 | ||
911 | DEFINE_SPINLOCK(pgd_lock); | 910 | DEFINE_SPINLOCK(pgd_lock); |
912 | LIST_HEAD(pgd_list); | 911 | LIST_HEAD(pgd_list); |
913 | #endif | ||
914 | 912 | ||
915 | void vmalloc_sync_all(void) | 913 | void vmalloc_sync_all(void) |
916 | { | 914 | { |
@@ -935,13 +933,11 @@ void vmalloc_sync_all(void) | |||
935 | struct page *page; | 933 | struct page *page; |
936 | 934 | ||
937 | spin_lock_irqsave(&pgd_lock, flags); | 935 | spin_lock_irqsave(&pgd_lock, flags); |
938 | for (page = pgd_list; page; page = | 936 | list_for_each_entry(page, &pgd_list, lru) { |
939 | (struct page *)page->index) | ||
940 | if (!vmalloc_sync_one(page_address(page), | 937 | if (!vmalloc_sync_one(page_address(page), |
941 | address)) { | 938 | address)) |
942 | BUG_ON(page != pgd_list); | ||
943 | break; | 939 | break; |
944 | } | 940 | } |
945 | spin_unlock_irqrestore(&pgd_lock, flags); | 941 | spin_unlock_irqrestore(&pgd_lock, flags); |
946 | if (!page) | 942 | if (!page) |
947 | set_bit(pgd_index(address), insync); | 943 | set_bit(pgd_index(address), insync); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ec07c1873d65..1cc6607eacb0 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -175,7 +175,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | |||
175 | if (!SHARED_KERNEL_PMD) { | 175 | if (!SHARED_KERNEL_PMD) { |
176 | struct page *page; | 176 | struct page *page; |
177 | 177 | ||
178 | for (page = pgd_list; page; page = (struct page *)page->index) { | 178 | list_for_each_entry(page, &pgd_list, lru) { |
179 | pgd_t *pgd; | 179 | pgd_t *pgd; |
180 | pud_t *pud; | 180 | pud_t *pud; |
181 | pmd_t *pmd; | 181 | pmd_t *pmd; |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 5ca3552474ae..2ae5999a795a 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -205,27 +205,18 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
205 | * vmalloc faults work because attached pagetables are never freed. | 205 | * vmalloc faults work because attached pagetables are never freed. |
206 | * -- wli | 206 | * -- wli |
207 | */ | 207 | */ |
208 | DEFINE_SPINLOCK(pgd_lock); | ||
209 | struct page *pgd_list; | ||
210 | |||
211 | static inline void pgd_list_add(pgd_t *pgd) | 208 | static inline void pgd_list_add(pgd_t *pgd) |
212 | { | 209 | { |
213 | struct page *page = virt_to_page(pgd); | 210 | struct page *page = virt_to_page(pgd); |
214 | page->index = (unsigned long)pgd_list; | 211 | |
215 | if (pgd_list) | 212 | list_add(&page->lru, &pgd_list); |
216 | set_page_private(pgd_list, (unsigned long)&page->index); | ||
217 | pgd_list = page; | ||
218 | set_page_private(page, (unsigned long)&pgd_list); | ||
219 | } | 213 | } |
220 | 214 | ||
221 | static inline void pgd_list_del(pgd_t *pgd) | 215 | static inline void pgd_list_del(pgd_t *pgd) |
222 | { | 216 | { |
223 | struct page *next, **pprev, *page = virt_to_page(pgd); | 217 | struct page *page = virt_to_page(pgd); |
224 | next = (struct page *)page->index; | 218 | |
225 | pprev = (struct page **)page_private(page); | 219 | list_del(&page->lru); |
226 | *pprev = next; | ||
227 | if (next) | ||
228 | set_page_private(next, (unsigned long)pprev); | ||
229 | } | 220 | } |
230 | 221 | ||
231 | 222 | ||