aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pgtable.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-09-21 15:01:51 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-10-19 16:57:08 -0400
commit617d34d9e5d8326ec8f188c616aa06ac59d083fe (patch)
tree763d02b7713bad65ba819a8334bb0e95d4370352 /arch/x86/mm/pgtable.c
parent44235dcde416104b8e1db7606c283f4c0149c760 (diff)
x86, mm: Hold mm->page_table_lock while doing vmalloc_sync
Take mm->page_table_lock while syncing the vmalloc region. This prevents a race with the Xen pagetable pin/unpin code, which expects that the page_table_lock is already held. If this race occurs, then Xen can see an inconsistent page type (a page can either be read/write or a pagetable page, and pin/unpin converts it between them), which will cause either the pin or the set_p[gm]d to fail; either will crash the kernel. vmalloc_sync_all() should be called rarely, so this extra use of page_table_lock should not interfere with its normal users. The mm pointer is stashed in the pgd page's index field, as that won't be otherwise used for pgds. Reported-by: Ian Campbell <ian.cambell@eu.citrix.com> Originally-by: Jan Beulich <jbeulich@novell.com> LKML-Reference: <4CB88A4C.1080305@goop.org> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/pgtable.c')
-rw-r--r--arch/x86/mm/pgtable.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 5c4ee422590e..c70e57dbb491 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -87,7 +87,19 @@ static inline void pgd_list_del(pgd_t *pgd)
87#define UNSHARED_PTRS_PER_PGD \ 87#define UNSHARED_PTRS_PER_PGD \
88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
89 89
90static void pgd_ctor(pgd_t *pgd) 90
91static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
92{
93 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
94 virt_to_page(pgd)->index = (pgoff_t)mm;
95}
96
97struct mm_struct *pgd_page_get_mm(struct page *page)
98{
99 return (struct mm_struct *)page->index;
100}
101
102static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
91{ 103{
92 /* If the pgd points to a shared pagetable level (either the 104 /* If the pgd points to a shared pagetable level (either the
93 ptes in non-PAE, or shared PMD in PAE), then just copy the 105 ptes in non-PAE, or shared PMD in PAE), then just copy the
@@ -105,8 +117,10 @@ static void pgd_ctor(pgd_t *pgd)
105 } 117 }
106 118
107 /* list required to sync kernel mapping updates */ 119 /* list required to sync kernel mapping updates */
108 if (!SHARED_KERNEL_PMD) 120 if (!SHARED_KERNEL_PMD) {
121 pgd_set_mm(pgd, mm);
109 pgd_list_add(pgd); 122 pgd_list_add(pgd);
123 }
110} 124}
111 125
112static void pgd_dtor(pgd_t *pgd) 126static void pgd_dtor(pgd_t *pgd)
@@ -272,7 +286,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
272 */ 286 */
273 spin_lock_irqsave(&pgd_lock, flags); 287 spin_lock_irqsave(&pgd_lock, flags);
274 288
275 pgd_ctor(pgd); 289 pgd_ctor(mm, pgd);
276 pgd_prepopulate_pmd(mm, pgd, pmds); 290 pgd_prepopulate_pmd(mm, pgd, pmds);
277 291
278 spin_unlock_irqrestore(&pgd_lock, flags); 292 spin_unlock_irqrestore(&pgd_lock, flags);