diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-09-21 15:01:51 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2010-10-19 16:57:08 -0400 |
commit | 617d34d9e5d8326ec8f188c616aa06ac59d083fe (patch) | |
tree | 763d02b7713bad65ba819a8334bb0e95d4370352 /arch/x86/mm/init_64.c | |
parent | 44235dcde416104b8e1db7606c283f4c0149c760 (diff) |
x86, mm: Hold mm->page_table_lock while doing vmalloc_sync
Take mm->page_table_lock while syncing the vmalloc region. This prevents
a race with the Xen pagetable pin/unpin code, which expects that the
page_table_lock is already held. If this race occurs, then Xen can see
an inconsistent page type (a page can either be read/write or a pagetable
page, and pin/unpin converts it between them), which will cause either
the pin or the set_p[gm]d to fail; either will crash the kernel.
vmalloc_sync_all() should be called rarely, so this extra use of
page_table_lock should not interfere with its normal users.
The mm pointer is stashed in the pgd page's index field, as that won't
be otherwise used for pgds.
Reported-by: Ian Campbell <ian.cambell@eu.citrix.com>
Originally-by: Jan Beulich <jbeulich@novell.com>
LKML-Reference: <4CB88A4C.1080305@goop.org>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r-- | arch/x86/mm/init_64.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1ad7c0ff5d2b..4d323fb770c2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -116,12 +116,19 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
116 | spin_lock_irqsave(&pgd_lock, flags); | 116 | spin_lock_irqsave(&pgd_lock, flags); |
117 | list_for_each_entry(page, &pgd_list, lru) { | 117 | list_for_each_entry(page, &pgd_list, lru) { |
118 | pgd_t *pgd; | 118 | pgd_t *pgd; |
119 | spinlock_t *pgt_lock; | ||
120 | |||
119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 121 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
122 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | ||
123 | spin_lock(pgt_lock); | ||
124 | |||
120 | if (pgd_none(*pgd)) | 125 | if (pgd_none(*pgd)) |
121 | set_pgd(pgd, *pgd_ref); | 126 | set_pgd(pgd, *pgd_ref); |
122 | else | 127 | else |
123 | BUG_ON(pgd_page_vaddr(*pgd) | 128 | BUG_ON(pgd_page_vaddr(*pgd) |
124 | != pgd_page_vaddr(*pgd_ref)); | 129 | != pgd_page_vaddr(*pgd_ref)); |
130 | |||
131 | spin_unlock(pgt_lock); | ||
125 | } | 132 | } |
126 | spin_unlock_irqrestore(&pgd_lock, flags); | 133 | spin_unlock_irqrestore(&pgd_lock, flags); |
127 | } | 134 | } |