aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-10-25 10:28:14 -0400
committerRobert Richter <robert.richter@amd.com>2010-10-25 10:29:12 -0400
commitdbd1e66e04558a582e673bc4a9cd933ce0228d93 (patch)
tree85f3633276282cde0a3ac558d988704eaa3e68af /arch/x86/mm/fault.c
parent328b8f1ba50b708a1b3c0acd7c41ee1b356822f6 (diff)
parent4a60cfa9457749f7987fd4f3c956dbba5a281129 (diff)
Merge commit 'linux-2.6/master' (early part) into oprofile/core
This branch depends on these apic patches: apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets apic, x86: Check if EILVT APIC registers are available (AMD only) Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c43
1 files changed, 18 insertions, 25 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a24c6cfdccc4..79b0b372d2d0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -229,7 +229,16 @@ void vmalloc_sync_all(void)
229 229
230 spin_lock_irqsave(&pgd_lock, flags); 230 spin_lock_irqsave(&pgd_lock, flags);
231 list_for_each_entry(page, &pgd_list, lru) { 231 list_for_each_entry(page, &pgd_list, lru) {
232 if (!vmalloc_sync_one(page_address(page), address)) 232 spinlock_t *pgt_lock;
233 pmd_t *ret;
234
235 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
236
237 spin_lock(pgt_lock);
238 ret = vmalloc_sync_one(page_address(page), address);
239 spin_unlock(pgt_lock);
240
241 if (!ret)
233 break; 242 break;
234 } 243 }
235 spin_unlock_irqrestore(&pgd_lock, flags); 244 spin_unlock_irqrestore(&pgd_lock, flags);
@@ -328,29 +337,7 @@ out:
328 337
329void vmalloc_sync_all(void) 338void vmalloc_sync_all(void)
330{ 339{
331 unsigned long address; 340 sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
332
333 for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
334 address += PGDIR_SIZE) {
335
336 const pgd_t *pgd_ref = pgd_offset_k(address);
337 unsigned long flags;
338 struct page *page;
339
340 if (pgd_none(*pgd_ref))
341 continue;
342
343 spin_lock_irqsave(&pgd_lock, flags);
344 list_for_each_entry(page, &pgd_list, lru) {
345 pgd_t *pgd;
346 pgd = (pgd_t *)page_address(page) + pgd_index(address);
347 if (pgd_none(*pgd))
348 set_pgd(pgd, *pgd_ref);
349 else
350 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
351 }
352 spin_unlock_irqrestore(&pgd_lock, flags);
353 }
354} 341}
355 342
356/* 343/*
@@ -898,8 +885,14 @@ spurious_fault(unsigned long error_code, unsigned long address)
898 if (pmd_large(*pmd)) 885 if (pmd_large(*pmd))
899 return spurious_fault_check(error_code, (pte_t *) pmd); 886 return spurious_fault_check(error_code, (pte_t *) pmd);
900 887
888 /*
889 * Note: don't use pte_present() here, since it returns true
890 * if the _PAGE_PROTNONE bit is set. However, this aliases the
891 * _PAGE_GLOBAL bit, which for kernel pages give false positives
892 * when CONFIG_DEBUG_PAGEALLOC is used.
893 */
901 pte = pte_offset_kernel(pmd, address); 894 pte = pte_offset_kernel(pmd, address);
902 if (!pte_present(*pte)) 895 if (!(pte_flags(*pte) & _PAGE_PRESENT))
903 return 0; 896 return 0;
904 897
905 ret = spurious_fault_check(error_code, pte); 898 ret = spurious_fault_check(error_code, pte);