aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2006-03-25 10:29:40 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 12:10:53 -0500
commit8c914cb704a11460eec7ed2a572bb5e9bd513d24 (patch)
tree3d735f0e33f474b296f106dee70935d77e267a74 /include
parent85f9eebccde51e24896f31383f5b70776362e1a6 (diff)
[PATCH] x86_64: actively synchronize vmalloc area when registering certain callbacks
While the modular aspect of the respective i386 patch doesn't apply to x86-64 (as the top level page directory entry is shared between modules and the base kernel), handlers registered with register_die_notifier() are still under similar constraints for touching ioremap()ed or vmalloc()ed memory. The likelihood of this problem becoming visible is of course significantly lower, as the assigned virtual addresses would have to cross a 2**39 byte boundary. This is because the callback gets invoked (a) in the page fault path before the top level page table propagation gets carried out (hence a fault to propagate the top level page table entry/entries mapping to module's code/data would nest infinitly) and (b) in the NMI path, where nested faults must absolutely not happen, since otherwise the IRET from the nested fault re-enables NMIs, potentially resulting in nested NMI occurences. Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/pgalloc.h28
-rw-r--r--include/asm-x86_64/pgtable.h4
2 files changed, 32 insertions, 0 deletions
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 08cad2482bcb..43d4c333a8b1 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -45,12 +45,39 @@ static inline void pud_free (pud_t *pud)
45 free_page((unsigned long)pud); 45 free_page((unsigned long)pud);
46} 46}
47 47
48static inline void pgd_list_add(pgd_t *pgd)
49{
50 struct page *page = virt_to_page(pgd);
51
52 spin_lock(&pgd_lock);
53 page->index = (pgoff_t)pgd_list;
54 if (pgd_list)
55 pgd_list->private = (unsigned long)&page->index;
56 pgd_list = page;
57 page->private = (unsigned long)&pgd_list;
58 spin_unlock(&pgd_lock);
59}
60
61static inline void pgd_list_del(pgd_t *pgd)
62{
63 struct page *next, **pprev, *page = virt_to_page(pgd);
64
65 spin_lock(&pgd_lock);
66 next = (struct page *)page->index;
67 pprev = (struct page **)page->private;
68 *pprev = next;
69 if (next)
70 next->private = (unsigned long)pprev;
71 spin_unlock(&pgd_lock);
72}
73
48static inline pgd_t *pgd_alloc(struct mm_struct *mm) 74static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49{ 75{
50 unsigned boundary; 76 unsigned boundary;
51 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 77 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
52 if (!pgd) 78 if (!pgd)
53 return NULL; 79 return NULL;
80 pgd_list_add(pgd);
54 /* 81 /*
55 * Copy kernel pointers in from init. 82 * Copy kernel pointers in from init.
56 * Could keep a freelist or slab cache of those because the kernel 83 * Could keep a freelist or slab cache of those because the kernel
@@ -67,6 +94,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
67static inline void pgd_free(pgd_t *pgd) 94static inline void pgd_free(pgd_t *pgd)
68{ 95{
69 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); 96 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
97 pgd_list_del(pgd);
70 free_page((unsigned long)pgd); 98 free_page((unsigned long)pgd);
71} 99}
72 100
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index def903287193..31e83c3bd022 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -420,6 +420,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
420#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 420#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
421#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 421#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
422 422
423extern spinlock_t pgd_lock;
424extern struct page *pgd_list;
425void vmalloc_sync_all(void);
426
423#endif /* !__ASSEMBLY__ */ 427#endif /* !__ASSEMBLY__ */
424 428
425extern int kern_addr_valid(unsigned long addr); 429extern int kern_addr_valid(unsigned long addr);