diff options
-rw-r--r-- | arch/x86/include/asm/pgtable_64.h | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 24 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 30 |
3 files changed, 33 insertions, 23 deletions
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 076052cd62be..f96ac9bedf75 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd) | |||
102 | native_set_pgd(pgd, native_make_pgd(0)); | 102 | native_set_pgd(pgd, native_make_pgd(0)); |
103 | } | 103 | } |
104 | 104 | ||
105 | extern void sync_global_pgds(unsigned long start, unsigned long end); | ||
106 | |||
105 | /* | 107 | /* |
106 | * Conversion functions: convert a page and protection to a page entry, | 108 | * Conversion functions: convert a page and protection to a page entry, |
107 | * and a page entry and page directory to the page they refer to. | 109 | * and a page entry and page directory to the page they refer to. |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4c4508e8a204..51f7ee71d6c7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -326,29 +326,7 @@ out: | |||
326 | 326 | ||
327 | void vmalloc_sync_all(void) | 327 | void vmalloc_sync_all(void) |
328 | { | 328 | { |
329 | unsigned long address; | 329 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
330 | |||
331 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
332 | address += PGDIR_SIZE) { | ||
333 | |||
334 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
335 | unsigned long flags; | ||
336 | struct page *page; | ||
337 | |||
338 | if (pgd_none(*pgd_ref)) | ||
339 | continue; | ||
340 | |||
341 | spin_lock_irqsave(&pgd_lock, flags); | ||
342 | list_for_each_entry(page, &pgd_list, lru) { | ||
343 | pgd_t *pgd; | ||
344 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
345 | if (pgd_none(*pgd)) | ||
346 | set_pgd(pgd, *pgd_ref); | ||
347 | else | ||
348 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
349 | } | ||
350 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
351 | } | ||
352 | } | 330 | } |
353 | 331 | ||
354 | /* | 332 | /* |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9a6674689a20..61a1b4fdecbf 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -98,6 +98,36 @@ static int __init nonx32_setup(char *str) | |||
98 | __setup("noexec32=", nonx32_setup); | 98 | __setup("noexec32=", nonx32_setup); |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * When memory was added/removed make sure all the processes MM have | ||
102 | * suitable PGD entries in the local PGD level page. | ||
103 | */ | ||
104 | void sync_global_pgds(unsigned long start, unsigned long end) | ||
105 | { | ||
106 | unsigned long address; | ||
107 | |||
108 | for (address = start; address <= end; address += PGDIR_SIZE) { | ||
109 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
110 | unsigned long flags; | ||
111 | struct page *page; | ||
112 | |||
113 | if (pgd_none(*pgd_ref)) | ||
114 | continue; | ||
115 | |||
116 | spin_lock_irqsave(&pgd_lock, flags); | ||
117 | list_for_each_entry(page, &pgd_list, lru) { | ||
118 | pgd_t *pgd; | ||
119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
120 | if (pgd_none(*pgd)) | ||
121 | set_pgd(pgd, *pgd_ref); | ||
122 | else | ||
123 | BUG_ON(pgd_page_vaddr(*pgd) | ||
124 | != pgd_page_vaddr(*pgd_ref)); | ||
125 | } | ||
126 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
127 | } | ||
128 | } | ||
129 | |||
130 | /* | ||
101 | * NOTE: This function is marked __ref because it calls __init function | 131 | * NOTE: This function is marked __ref because it calls __init function |
102 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | 132 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. |
103 | */ | 133 | */ |