diff options
author | Jan Beulich <jbeulich@novell.com> | 2008-08-29 07:53:45 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-06 13:48:01 -0400 |
commit | cc643d4687533345fd8ebcba836f9ee25df7c458 (patch) | |
tree | d19fbbb173681a9b8ded800edea3d0002036c7d1 /arch/x86/mm/fault.c | |
parent | 913da64b54b2b3bb212a59aba2e6f2b8294ca1fa (diff) |
x86: adjust vmalloc_sync_all() for Xen (2nd try)
Since the fourth PDPT entry cannot be shared under Xen,
vmalloc_sync_all() must iterate over pmd-s rather than pgd-s here.
Luckily, the code isn't used for native PAE (SHARED_KERNEL_PMD is 1)
and the change is benign to non-PAE.
Also do a little more cleanup in that function.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 455f3fe67b42..356ed2dec3a6 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -915,15 +915,15 @@ LIST_HEAD(pgd_list); | |||
915 | 915 | ||
916 | void vmalloc_sync_all(void) | 916 | void vmalloc_sync_all(void) |
917 | { | 917 | { |
918 | #ifdef CONFIG_X86_32 | ||
919 | unsigned long start = VMALLOC_START & PGDIR_MASK; | ||
920 | unsigned long address; | 918 | unsigned long address; |
921 | 919 | ||
920 | #ifdef CONFIG_X86_32 | ||
922 | if (SHARED_KERNEL_PMD) | 921 | if (SHARED_KERNEL_PMD) |
923 | return; | 922 | return; |
924 | 923 | ||
925 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); | 924 | for (address = VMALLOC_START & PMD_MASK; |
926 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { | 925 | address >= TASK_SIZE && address < FIXADDR_TOP; |
926 | address += PMD_SIZE) { | ||
927 | unsigned long flags; | 927 | unsigned long flags; |
928 | struct page *page; | 928 | struct page *page; |
929 | 929 | ||
@@ -936,10 +936,8 @@ void vmalloc_sync_all(void) | |||
936 | spin_unlock_irqrestore(&pgd_lock, flags); | 936 | spin_unlock_irqrestore(&pgd_lock, flags); |
937 | } | 937 | } |
938 | #else /* CONFIG_X86_64 */ | 938 | #else /* CONFIG_X86_64 */ |
939 | unsigned long start = VMALLOC_START & PGDIR_MASK; | 939 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; |
940 | unsigned long address; | 940 | address += PGDIR_SIZE) { |
941 | |||
942 | for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { | ||
943 | const pgd_t *pgd_ref = pgd_offset_k(address); | 941 | const pgd_t *pgd_ref = pgd_offset_k(address); |
944 | unsigned long flags; | 942 | unsigned long flags; |
945 | struct page *page; | 943 | struct page *page; |