aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-10-25 10:28:14 -0400
committerRobert Richter <robert.richter@amd.com>2010-10-25 10:29:12 -0400
commitdbd1e66e04558a582e673bc4a9cd933ce0228d93 (patch)
tree85f3633276282cde0a3ac558d988704eaa3e68af /arch/x86/mm/init_64.c
parent328b8f1ba50b708a1b3c0acd7c41ee1b356822f6 (diff)
parent4a60cfa9457749f7987fd4f3c956dbba5a281129 (diff)
Merge commit 'linux-2.6/master' (early part) into oprofile/core
This branch depends on these apic patches: apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets apic, x86: Check if EILVT APIC registers are available (AMD only) Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c49
1 files changed, 47 insertions, 2 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9a6674689a20..c55f900fbf89 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -98,6 +98,43 @@ static int __init nonx32_setup(char *str)
98__setup("noexec32=", nonx32_setup); 98__setup("noexec32=", nonx32_setup);
99 99
100/* 100/*
101 * When memory was added/removed make sure all the processes MM have
102 * suitable PGD entries in the local PGD level page.
103 */
104void sync_global_pgds(unsigned long start, unsigned long end)
105{
106 unsigned long address;
107
108 for (address = start; address <= end; address += PGDIR_SIZE) {
109 const pgd_t *pgd_ref = pgd_offset_k(address);
110 unsigned long flags;
111 struct page *page;
112
113 if (pgd_none(*pgd_ref))
114 continue;
115
116 spin_lock_irqsave(&pgd_lock, flags);
117 list_for_each_entry(page, &pgd_list, lru) {
118 pgd_t *pgd;
119 spinlock_t *pgt_lock;
120
121 pgd = (pgd_t *)page_address(page) + pgd_index(address);
122 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
123 spin_lock(pgt_lock);
124
125 if (pgd_none(*pgd))
126 set_pgd(pgd, *pgd_ref);
127 else
128 BUG_ON(pgd_page_vaddr(*pgd)
129 != pgd_page_vaddr(*pgd_ref));
130
131 spin_unlock(pgt_lock);
132 }
133 spin_unlock_irqrestore(&pgd_lock, flags);
134 }
135}
136
137/*
101 * NOTE: This function is marked __ref because it calls __init function 138 * NOTE: This function is marked __ref because it calls __init function
102 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. 139 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
103 */ 140 */
@@ -293,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
293 panic("alloc_low_page: ran out of memory"); 330 panic("alloc_low_page: ran out of memory");
294 331
295 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); 332 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
296 memset(adr, 0, PAGE_SIZE); 333 clear_page(adr);
297 *phys = pfn * PAGE_SIZE; 334 *phys = pfn * PAGE_SIZE;
298 return adr; 335 return adr;
299} 336}
@@ -534,11 +571,13 @@ kernel_physical_mapping_init(unsigned long start,
534 unsigned long end, 571 unsigned long end,
535 unsigned long page_size_mask) 572 unsigned long page_size_mask)
536{ 573{
537 574 bool pgd_changed = false;
538 unsigned long next, last_map_addr = end; 575 unsigned long next, last_map_addr = end;
576 unsigned long addr;
539 577
540 start = (unsigned long)__va(start); 578 start = (unsigned long)__va(start);
541 end = (unsigned long)__va(end); 579 end = (unsigned long)__va(end);
580 addr = start;
542 581
543 for (; start < end; start = next) { 582 for (; start < end; start = next) {
544 pgd_t *pgd = pgd_offset_k(start); 583 pgd_t *pgd = pgd_offset_k(start);
@@ -563,7 +602,12 @@ kernel_physical_mapping_init(unsigned long start,
563 spin_lock(&init_mm.page_table_lock); 602 spin_lock(&init_mm.page_table_lock);
564 pgd_populate(&init_mm, pgd, __va(pud_phys)); 603 pgd_populate(&init_mm, pgd, __va(pud_phys));
565 spin_unlock(&init_mm.page_table_lock); 604 spin_unlock(&init_mm.page_table_lock);
605 pgd_changed = true;
566 } 606 }
607
608 if (pgd_changed)
609 sync_global_pgds(addr, end);
610
567 __flush_tlb_all(); 611 __flush_tlb_all();
568 612
569 return last_map_addr; 613 return last_map_addr;
@@ -1003,6 +1047,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1003 } 1047 }
1004 1048
1005 } 1049 }
1050 sync_global_pgds((unsigned long)start_page, end);
1006 return 0; 1051 return 0;
1007} 1052}
1008 1053