aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ad8b9733d6b3..621afb6343dc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -428,6 +428,16 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
428} 428}
429#endif 429#endif
430 430
431static int spurious_fault_check(unsigned long error_code, pte_t *pte)
432{
433 if ((error_code & PF_WRITE) && !pte_write(*pte))
434 return 0;
435 if ((error_code & PF_INSTR) && !pte_exec(*pte))
436 return 0;
437
438 return 1;
439}
440
431/* 441/*
432 * Handle a spurious fault caused by a stale TLB entry. This allows 442 * Handle a spurious fault caused by a stale TLB entry. This allows
433 * us to lazily refresh the TLB when increasing the permissions of a 443 * us to lazily refresh the TLB when increasing the permissions of a
@@ -457,20 +467,21 @@ static int spurious_fault(unsigned long address,
457 if (!pud_present(*pud)) 467 if (!pud_present(*pud))
458 return 0; 468 return 0;
459 469
470 if (pud_large(*pud))
471 return spurious_fault_check(error_code, (pte_t *) pud);
472
460 pmd = pmd_offset(pud, address); 473 pmd = pmd_offset(pud, address);
461 if (!pmd_present(*pmd)) 474 if (!pmd_present(*pmd))
462 return 0; 475 return 0;
463 476
477 if (pmd_large(*pmd))
478 return spurious_fault_check(error_code, (pte_t *) pmd);
479
464 pte = pte_offset_kernel(pmd, address); 480 pte = pte_offset_kernel(pmd, address);
465 if (!pte_present(*pte)) 481 if (!pte_present(*pte))
466 return 0; 482 return 0;
467 483
468 if ((error_code & PF_WRITE) && !pte_write(*pte)) 484 return spurious_fault_check(error_code, pte);
469 return 0;
470 if ((error_code & PF_INSTR) && !pte_exec(*pte))
471 return 0;
472
473 return 1;
474} 485}
475 486
476/* 487/*
@@ -947,11 +958,12 @@ void vmalloc_sync_all(void)
947 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { 958 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
948 if (!test_bit(pgd_index(address), insync)) { 959 if (!test_bit(pgd_index(address), insync)) {
949 const pgd_t *pgd_ref = pgd_offset_k(address); 960 const pgd_t *pgd_ref = pgd_offset_k(address);
961 unsigned long flags;
950 struct page *page; 962 struct page *page;
951 963
952 if (pgd_none(*pgd_ref)) 964 if (pgd_none(*pgd_ref))
953 continue; 965 continue;
954 spin_lock(&pgd_lock); 966 spin_lock_irqsave(&pgd_lock, flags);
955 list_for_each_entry(page, &pgd_list, lru) { 967 list_for_each_entry(page, &pgd_list, lru) {
956 pgd_t *pgd; 968 pgd_t *pgd;
957 pgd = (pgd_t *)page_address(page) + pgd_index(address); 969 pgd = (pgd_t *)page_address(page) + pgd_index(address);
@@ -960,7 +972,7 @@ void vmalloc_sync_all(void)
960 else 972 else
961 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 973 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
962 } 974 }
963 spin_unlock(&pgd_lock); 975 spin_unlock_irqrestore(&pgd_lock, flags);
964 set_bit(pgd_index(address), insync); 976 set_bit(pgd_index(address), insync);
965 } 977 }
966 if (address == start) 978 if (address == start)