aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-06-25 00:19:11 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 07:10:59 -0400
commit67350a5c4514c280665cdb45439d32a008a264ba (patch)
tree94a1b3e3c73bb1c2b68d72d39b26a83f5fdeb28e /arch/x86/mm/fault.c
parent15878c0b21b7b04a08108e9027ebbbd68a2502e0 (diff)
x86: simplify vmalloc_sync_all
vmalloc_sync_all() is only called from register_die_notifier and alloc_vm_area. Neither is on any performance-critical paths, so vmalloc_sync_all() itself is not on any hot paths. Given that the optimisations in vmalloc_sync_all add a fair amount of code and complexity, and are fairly hard to evaluate for correctness, it's better to just remove them to simplify the code rather than worry about its absolute performance. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: xen-devel <xen-devel@lists.xensource.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c77
1 files changed, 26 insertions, 51 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 578b76819551..d0f5fce77d95 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -903,14 +903,7 @@ LIST_HEAD(pgd_list);
903void vmalloc_sync_all(void) 903void vmalloc_sync_all(void)
904{ 904{
905#ifdef CONFIG_X86_32 905#ifdef CONFIG_X86_32
906 /* 906 unsigned long start = VMALLOC_START & PGDIR_MASK;
907 * Note that races in the updates of insync and start aren't
908 * problematic: insync can only get set bits added, and updates to
909 * start are only improving performance (without affecting correctness
910 * if undone).
911 */
912 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
913 static unsigned long start = TASK_SIZE;
914 unsigned long address; 907 unsigned long address;
915 908
916 if (SHARED_KERNEL_PMD) 909 if (SHARED_KERNEL_PMD)
@@ -918,56 +911,38 @@ void vmalloc_sync_all(void)
918 911
919 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); 912 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
920 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { 913 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
921 if (!test_bit(pgd_index(address), insync)) { 914 unsigned long flags;
922 unsigned long flags; 915 struct page *page;
923 struct page *page; 916
924 917 spin_lock_irqsave(&pgd_lock, flags);
925 spin_lock_irqsave(&pgd_lock, flags); 918 list_for_each_entry(page, &pgd_list, lru) {
926 list_for_each_entry(page, &pgd_list, lru) { 919 if (!vmalloc_sync_one(page_address(page),
927 if (!vmalloc_sync_one(page_address(page), 920 address))
928 address)) 921 break;
929 break;
930 }
931 spin_unlock_irqrestore(&pgd_lock, flags);
932 if (!page)
933 set_bit(pgd_index(address), insync);
934 } 922 }
935 if (address == start && test_bit(pgd_index(address), insync)) 923 spin_unlock_irqrestore(&pgd_lock, flags);
936 start = address + PGDIR_SIZE;
937 } 924 }
938#else /* CONFIG_X86_64 */ 925#else /* CONFIG_X86_64 */
939 /* 926 unsigned long start = VMALLOC_START & PGDIR_MASK;
940 * Note that races in the updates of insync and start aren't
941 * problematic: insync can only get set bits added, and updates to
942 * start are only improving performance (without affecting correctness
943 * if undone).
944 */
945 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
946 static unsigned long start = VMALLOC_START & PGDIR_MASK;
947 unsigned long address; 927 unsigned long address;
948 928
949 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { 929 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
950 if (!test_bit(pgd_index(address), insync)) { 930 const pgd_t *pgd_ref = pgd_offset_k(address);
951 const pgd_t *pgd_ref = pgd_offset_k(address); 931 unsigned long flags;
952 unsigned long flags; 932 struct page *page;
953 struct page *page; 933
954 934 if (pgd_none(*pgd_ref))
955 if (pgd_none(*pgd_ref)) 935 continue;
956 continue; 936 spin_lock_irqsave(&pgd_lock, flags);
957 spin_lock_irqsave(&pgd_lock, flags); 937 list_for_each_entry(page, &pgd_list, lru) {
958 list_for_each_entry(page, &pgd_list, lru) { 938 pgd_t *pgd;
959 pgd_t *pgd; 939 pgd = (pgd_t *)page_address(page) + pgd_index(address);
960 pgd = (pgd_t *)page_address(page) + pgd_index(address); 940 if (pgd_none(*pgd))
961 if (pgd_none(*pgd)) 941 set_pgd(pgd, *pgd_ref);
962 set_pgd(pgd, *pgd_ref); 942 else
963 else 943 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
964 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
965 }
966 spin_unlock_irqrestore(&pgd_lock, flags);
967 set_bit(pgd_index(address), insync);
968 } 944 }
969 if (address == start) 945 spin_unlock_irqrestore(&pgd_lock, flags);
970 start = address + PGDIR_SIZE;
971 } 946 }
972#endif 947#endif
973} 948}