diff options
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 77 |
1 files changed, 26 insertions, 51 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 578b76819551..d0f5fce77d95 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -903,14 +903,7 @@ LIST_HEAD(pgd_list); | |||
903 | void vmalloc_sync_all(void) | 903 | void vmalloc_sync_all(void) |
904 | { | 904 | { |
905 | #ifdef CONFIG_X86_32 | 905 | #ifdef CONFIG_X86_32 |
906 | /* | 906 | unsigned long start = VMALLOC_START & PGDIR_MASK; |
907 | * Note that races in the updates of insync and start aren't | ||
908 | * problematic: insync can only get set bits added, and updates to | ||
909 | * start are only improving performance (without affecting correctness | ||
910 | * if undone). | ||
911 | */ | ||
912 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | ||
913 | static unsigned long start = TASK_SIZE; | ||
914 | unsigned long address; | 907 | unsigned long address; |
915 | 908 | ||
916 | if (SHARED_KERNEL_PMD) | 909 | if (SHARED_KERNEL_PMD) |
@@ -918,56 +911,38 @@ void vmalloc_sync_all(void) | |||
918 | 911 | ||
919 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); | 912 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); |
920 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { | 913 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { |
921 | if (!test_bit(pgd_index(address), insync)) { | 914 | unsigned long flags; |
922 | unsigned long flags; | 915 | struct page *page; |
923 | struct page *page; | 916 | |
924 | 917 | spin_lock_irqsave(&pgd_lock, flags); | |
925 | spin_lock_irqsave(&pgd_lock, flags); | 918 | list_for_each_entry(page, &pgd_list, lru) { |
926 | list_for_each_entry(page, &pgd_list, lru) { | 919 | if (!vmalloc_sync_one(page_address(page), |
927 | if (!vmalloc_sync_one(page_address(page), | 920 | address)) |
928 | address)) | 921 | break; |
929 | break; | ||
930 | } | ||
931 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
932 | if (!page) | ||
933 | set_bit(pgd_index(address), insync); | ||
934 | } | 922 | } |
935 | if (address == start && test_bit(pgd_index(address), insync)) | 923 | spin_unlock_irqrestore(&pgd_lock, flags); |
936 | start = address + PGDIR_SIZE; | ||
937 | } | 924 | } |
938 | #else /* CONFIG_X86_64 */ | 925 | #else /* CONFIG_X86_64 */ |
939 | /* | 926 | unsigned long start = VMALLOC_START & PGDIR_MASK; |
940 | * Note that races in the updates of insync and start aren't | ||
941 | * problematic: insync can only get set bits added, and updates to | ||
942 | * start are only improving performance (without affecting correctness | ||
943 | * if undone). | ||
944 | */ | ||
945 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | ||
946 | static unsigned long start = VMALLOC_START & PGDIR_MASK; | ||
947 | unsigned long address; | 927 | unsigned long address; |
948 | 928 | ||
949 | for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { | 929 | for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { |
950 | if (!test_bit(pgd_index(address), insync)) { | 930 | const pgd_t *pgd_ref = pgd_offset_k(address); |
951 | const pgd_t *pgd_ref = pgd_offset_k(address); | 931 | unsigned long flags; |
952 | unsigned long flags; | 932 | struct page *page; |
953 | struct page *page; | 933 | |
954 | 934 | if (pgd_none(*pgd_ref)) | |
955 | if (pgd_none(*pgd_ref)) | 935 | continue; |
956 | continue; | 936 | spin_lock_irqsave(&pgd_lock, flags); |
957 | spin_lock_irqsave(&pgd_lock, flags); | 937 | list_for_each_entry(page, &pgd_list, lru) { |
958 | list_for_each_entry(page, &pgd_list, lru) { | 938 | pgd_t *pgd; |
959 | pgd_t *pgd; | 939 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
960 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 940 | if (pgd_none(*pgd)) |
961 | if (pgd_none(*pgd)) | 941 | set_pgd(pgd, *pgd_ref); |
962 | set_pgd(pgd, *pgd_ref); | 942 | else |
963 | else | 943 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
964 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
965 | } | ||
966 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
967 | set_bit(pgd_index(address), insync); | ||
968 | } | 944 | } |
969 | if (address == start) | 945 | spin_unlock_irqrestore(&pgd_lock, flags); |
970 | start = address + PGDIR_SIZE; | ||
971 | } | 946 | } |
972 | #endif | 947 | #endif |
973 | } | 948 | } |