aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-15 15:29:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-15 15:29:54 -0500
commitbe354f40812314dee2b1e3aa272528c056bb827d (patch)
tree3261b6858619fddf2779499e3f0591a5677e6313 /arch/x86
parent7a280cf512053137a37da2801eac73a8842fa50d (diff)
Revert "x86, mm: Include the entire kernel memory map in trampoline_pgd"
This reverts commit 53b87cf088e2ea68d7c59619d0214cc15bb76133. It causes odd bootup problems on x86-64. Markus Trippelsdorf gets a repeatable oops, and I see a non-repeatable oops (or constant stream of messages that scroll off too quickly to read) that seems to go away with this commit reverted. So we don't know exactly what is wrong with the commit, but it's definitely problematic, and worth reverting sooner rather than later. Bisected-by: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: H Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Matt Fleming <matt.fleming@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/ioremap.c105
-rw-r--r--arch/x86/realmode/init.c17
3 files changed, 3 insertions, 128 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 07519a120449..2ead3c8a4c84 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
108 for (address = start; address <= end; address += PGDIR_SIZE) { 108 for (address = start; address <= end; address += PGDIR_SIZE) {
109 const pgd_t *pgd_ref = pgd_offset_k(address); 109 const pgd_t *pgd_ref = pgd_offset_k(address);
110 struct page *page; 110 struct page *page;
111 pgd_t *pgd;
112 111
113 if (pgd_none(*pgd_ref)) 112 if (pgd_none(*pgd_ref))
114 continue; 113 continue;
115 114
116 spin_lock(&pgd_lock); 115 spin_lock(&pgd_lock);
117 list_for_each_entry(page, &pgd_list, lru) { 116 list_for_each_entry(page, &pgd_list, lru) {
117 pgd_t *pgd;
118 spinlock_t *pgt_lock; 118 spinlock_t *pgt_lock;
119 119
120 pgd = (pgd_t *)page_address(page) + pgd_index(address); 120 pgd = (pgd_t *)page_address(page) + pgd_index(address);
@@ -130,13 +130,6 @@ void sync_global_pgds(unsigned long start, unsigned long end)
130 130
131 spin_unlock(pgt_lock); 131 spin_unlock(pgt_lock);
132 } 132 }
133
134 pgd = __va(real_mode_header->trampoline_pgd);
135 pgd += pgd_index(address);
136
137 if (pgd_none(*pgd))
138 set_pgd(pgd, *pgd_ref);
139
140 spin_unlock(&pgd_lock); 133 spin_unlock(&pgd_lock);
141 } 134 }
142} 135}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index e190f7b56653..78fe3f1ac49f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -50,107 +50,6 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
50 return err; 50 return err;
51} 51}
52 52
53#ifdef CONFIG_X86_64
54static void ident_pte_range(unsigned long paddr, unsigned long vaddr,
55 pmd_t *ppmd, pmd_t *vpmd, unsigned long end)
56{
57 pte_t *ppte = pte_offset_kernel(ppmd, paddr);
58 pte_t *vpte = pte_offset_kernel(vpmd, vaddr);
59
60 do {
61 set_pte(ppte, *vpte);
62 } while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end);
63}
64
65static int ident_pmd_range(unsigned long paddr, unsigned long vaddr,
66 pud_t *ppud, pud_t *vpud, unsigned long end)
67{
68 pmd_t *ppmd = pmd_offset(ppud, paddr);
69 pmd_t *vpmd = pmd_offset(vpud, vaddr);
70 unsigned long next;
71
72 do {
73 next = pmd_addr_end(vaddr, end);
74
75 if (!pmd_present(*ppmd)) {
76 pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL);
77 if (!ppte)
78 return 1;
79
80 set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte)));
81 }
82
83 ident_pte_range(paddr, vaddr, ppmd, vpmd, next);
84 } while (ppmd++, vpmd++, vaddr = next, vaddr != end);
85
86 return 0;
87}
88
89static int ident_pud_range(unsigned long paddr, unsigned long vaddr,
90 pgd_t *ppgd, pgd_t *vpgd, unsigned long end)
91{
92 pud_t *ppud = pud_offset(ppgd, paddr);
93 pud_t *vpud = pud_offset(vpgd, vaddr);
94 unsigned long next;
95
96 do {
97 next = pud_addr_end(vaddr, end);
98
99 if (!pud_present(*ppud)) {
100 pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
101 if (!ppmd)
102 return 1;
103
104 set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd)));
105 }
106
107 if (ident_pmd_range(paddr, vaddr, ppud, vpud, next))
108 return 1;
109 } while (ppud++, vpud++, vaddr = next, vaddr != end);
110
111 return 0;
112}
113
114static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr,
115 unsigned long size)
116{
117 unsigned long end = vaddr + size;
118 unsigned long next;
119 pgd_t *vpgd, *ppgd;
120
121 /* Don't map over the guard hole. */
122 if (paddr >= 0x800000000000 || paddr + size > 0x800000000000)
123 return 1;
124
125 ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr);
126
127 vpgd = pgd_offset_k(vaddr);
128 do {
129 next = pgd_addr_end(vaddr, end);
130
131 if (!pgd_present(*ppgd)) {
132 pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL);
133 if (!ppud)
134 return 1;
135
136 set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud)));
137 }
138
139 if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next))
140 return 1;
141 } while (ppgd++, vpgd++, vaddr = next, vaddr != end);
142
143 return 0;
144}
145#else
146static inline int insert_identity_mapping(resource_size_t paddr,
147 unsigned long vaddr,
148 unsigned long size)
149{
150 return 0;
151}
152#endif /* CONFIG_X86_64 */
153
154/* 53/*
155 * Remap an arbitrary physical address space into the kernel virtual 54 * Remap an arbitrary physical address space into the kernel virtual
156 * address space. Needed when the kernel wants to access high addresses 55 * address space. Needed when the kernel wants to access high addresses
@@ -264,10 +163,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
264 ret_addr = (void __iomem *) (vaddr + offset); 163 ret_addr = (void __iomem *) (vaddr + offset);
265 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 164 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
266 165
267 if (insert_identity_mapping(phys_addr, vaddr, size))
268 printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n",
269 (unsigned long long)phys_addr);
270
271 /* 166 /*
272 * Check if the request spans more than any BAR in the iomem resource 167 * Check if the request spans more than any BAR in the iomem resource
273 * tree. 168 * tree.
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 8e6ab6137852..cbca565af5bd 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -78,21 +78,8 @@ void __init setup_real_mode(void)
78 *trampoline_cr4_features = read_cr4(); 78 *trampoline_cr4_features = read_cr4();
79 79
80 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 80 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
81 81 trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
82 /* 82 trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
83 * Create an identity mapping for all of physical memory.
84 */
85 for (i = 0; i <= pgd_index(max_pfn << PAGE_SHIFT); i++) {
86 int index = pgd_index(PAGE_OFFSET) + i;
87
88 trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[index]);
89 }
90
91 /*
92 * Copy the upper-half of the kernel pages tables.
93 */
94 for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++)
95 trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[i]);
96#endif 83#endif
97} 84}
98 85