diff options
| author | Haicheng Li <haicheng.li@linux.intel.com> | 2010-05-19 05:42:14 -0400 |
|---|---|---|
| committer | H. Peter Anvin <hpa@linux.intel.com> | 2010-08-26 17:02:29 -0400 |
| commit | 6afb5157b9eba4092e2f0f54d24a3806409bdde5 (patch) | |
| tree | 5d213ef0366441af288b3a293557df09cb45cdca /arch/x86/mm/fault.c | |
| parent | 61c77326d1df079f202fa79403c3ccd8c5966a81 (diff) | |
x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions
No behavior change.
Move some of vmalloc_sync_all() code into a new function
sync_global_pgds() that will be useful for memory hotplug.
Signed-off-by: Haicheng Li <haicheng.li@linux.intel.com>
LKML-Reference: <4C6E4ECD.1090607@linux.intel.com>
Reviewed-by: Wu Fengguang <fengguang.wu@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/fault.c')
| -rw-r--r-- | arch/x86/mm/fault.c | 24 |
1 files changed, 1 insertions, 23 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4c4508e8a204..51f7ee71d6c7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -326,29 +326,7 @@ out: | |||
| 326 | 326 | ||
| 327 | void vmalloc_sync_all(void) | 327 | void vmalloc_sync_all(void) |
| 328 | { | 328 | { |
| 329 | unsigned long address; | 329 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
| 330 | |||
| 331 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
| 332 | address += PGDIR_SIZE) { | ||
| 333 | |||
| 334 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
| 335 | unsigned long flags; | ||
| 336 | struct page *page; | ||
| 337 | |||
| 338 | if (pgd_none(*pgd_ref)) | ||
| 339 | continue; | ||
| 340 | |||
| 341 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 342 | list_for_each_entry(page, &pgd_list, lru) { | ||
| 343 | pgd_t *pgd; | ||
| 344 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
| 345 | if (pgd_none(*pgd)) | ||
| 346 | set_pgd(pgd, *pgd_ref); | ||
| 347 | else | ||
| 348 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
| 349 | } | ||
| 350 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
| 351 | } | ||
| 352 | } | 330 | } |
| 353 | 331 | ||
| 354 | /* | 332 | /* |
