diff options
author | Matt Fleming <matt.fleming@intel.com> | 2012-09-07 13:23:51 -0400 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2012-10-30 06:39:19 -0400 |
commit | 53b87cf088e2ea68d7c59619d0214cc15bb76133 (patch) | |
tree | afa920fe2dc6e11dc023147fc6bffafcf19a2508 /arch/x86/mm | |
parent | e913ca7d16d70b75367ff56a3b201980501d542c (diff) |
x86, mm: Include the entire kernel memory map in trampoline_pgd
There are various pieces of code in arch/x86 that require a page table
with an identity mapping. Make trampoline_pgd a proper kernel page
table, it currently only includes the kernel text and module space
mapping.
One new feature of trampoline_pgd is that it now has mappings for the
physical I/O device addresses, which are inserted at ioremap()
time. Some broken implementations of EFI firmware require these
mappings to always be around.
Acked-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init_64.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 105 |
2 files changed, 113 insertions, 1 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2b6b4a3c8beb..fd4404f19d39 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
108 | for (address = start; address <= end; address += PGDIR_SIZE) { | 108 | for (address = start; address <= end; address += PGDIR_SIZE) { |
109 | const pgd_t *pgd_ref = pgd_offset_k(address); | 109 | const pgd_t *pgd_ref = pgd_offset_k(address); |
110 | struct page *page; | 110 | struct page *page; |
111 | pgd_t *pgd; | ||
111 | 112 | ||
112 | if (pgd_none(*pgd_ref)) | 113 | if (pgd_none(*pgd_ref)) |
113 | continue; | 114 | continue; |
114 | 115 | ||
115 | spin_lock(&pgd_lock); | 116 | spin_lock(&pgd_lock); |
116 | list_for_each_entry(page, &pgd_list, lru) { | 117 | list_for_each_entry(page, &pgd_list, lru) { |
117 | pgd_t *pgd; | ||
118 | spinlock_t *pgt_lock; | 118 | spinlock_t *pgt_lock; |
119 | 119 | ||
120 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 120 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
@@ -130,6 +130,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
130 | 130 | ||
131 | spin_unlock(pgt_lock); | 131 | spin_unlock(pgt_lock); |
132 | } | 132 | } |
133 | |||
134 | pgd = __va(real_mode_header->trampoline_pgd); | ||
135 | pgd += pgd_index(address); | ||
136 | |||
137 | if (pgd_none(*pgd)) | ||
138 | set_pgd(pgd, *pgd_ref); | ||
139 | |||
133 | spin_unlock(&pgd_lock); | 140 | spin_unlock(&pgd_lock); |
134 | } | 141 | } |
135 | } | 142 | } |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 78fe3f1ac49f..e190f7b56653 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -50,6 +50,107 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, | |||
50 | return err; | 50 | return err; |
51 | } | 51 | } |
52 | 52 | ||
53 | #ifdef CONFIG_X86_64 | ||
54 | static void ident_pte_range(unsigned long paddr, unsigned long vaddr, | ||
55 | pmd_t *ppmd, pmd_t *vpmd, unsigned long end) | ||
56 | { | ||
57 | pte_t *ppte = pte_offset_kernel(ppmd, paddr); | ||
58 | pte_t *vpte = pte_offset_kernel(vpmd, vaddr); | ||
59 | |||
60 | do { | ||
61 | set_pte(ppte, *vpte); | ||
62 | } while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end); | ||
63 | } | ||
64 | |||
65 | static int ident_pmd_range(unsigned long paddr, unsigned long vaddr, | ||
66 | pud_t *ppud, pud_t *vpud, unsigned long end) | ||
67 | { | ||
68 | pmd_t *ppmd = pmd_offset(ppud, paddr); | ||
69 | pmd_t *vpmd = pmd_offset(vpud, vaddr); | ||
70 | unsigned long next; | ||
71 | |||
72 | do { | ||
73 | next = pmd_addr_end(vaddr, end); | ||
74 | |||
75 | if (!pmd_present(*ppmd)) { | ||
76 | pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL); | ||
77 | if (!ppte) | ||
78 | return 1; | ||
79 | |||
80 | set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte))); | ||
81 | } | ||
82 | |||
83 | ident_pte_range(paddr, vaddr, ppmd, vpmd, next); | ||
84 | } while (ppmd++, vpmd++, vaddr = next, vaddr != end); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int ident_pud_range(unsigned long paddr, unsigned long vaddr, | ||
90 | pgd_t *ppgd, pgd_t *vpgd, unsigned long end) | ||
91 | { | ||
92 | pud_t *ppud = pud_offset(ppgd, paddr); | ||
93 | pud_t *vpud = pud_offset(vpgd, vaddr); | ||
94 | unsigned long next; | ||
95 | |||
96 | do { | ||
97 | next = pud_addr_end(vaddr, end); | ||
98 | |||
99 | if (!pud_present(*ppud)) { | ||
100 | pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); | ||
101 | if (!ppmd) | ||
102 | return 1; | ||
103 | |||
104 | set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd))); | ||
105 | } | ||
106 | |||
107 | if (ident_pmd_range(paddr, vaddr, ppud, vpud, next)) | ||
108 | return 1; | ||
109 | } while (ppud++, vpud++, vaddr = next, vaddr != end); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr, | ||
115 | unsigned long size) | ||
116 | { | ||
117 | unsigned long end = vaddr + size; | ||
118 | unsigned long next; | ||
119 | pgd_t *vpgd, *ppgd; | ||
120 | |||
121 | /* Don't map over the guard hole. */ | ||
122 | if (paddr >= 0x800000000000 || paddr + size > 0x800000000000) | ||
123 | return 1; | ||
124 | |||
125 | ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr); | ||
126 | |||
127 | vpgd = pgd_offset_k(vaddr); | ||
128 | do { | ||
129 | next = pgd_addr_end(vaddr, end); | ||
130 | |||
131 | if (!pgd_present(*ppgd)) { | ||
132 | pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL); | ||
133 | if (!ppud) | ||
134 | return 1; | ||
135 | |||
136 | set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud))); | ||
137 | } | ||
138 | |||
139 | if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next)) | ||
140 | return 1; | ||
141 | } while (ppgd++, vpgd++, vaddr = next, vaddr != end); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | #else | ||
146 | static inline int insert_identity_mapping(resource_size_t paddr, | ||
147 | unsigned long vaddr, | ||
148 | unsigned long size) | ||
149 | { | ||
150 | return 0; | ||
151 | } | ||
152 | #endif /* CONFIG_X86_64 */ | ||
153 | |||
53 | /* | 154 | /* |
54 | * Remap an arbitrary physical address space into the kernel virtual | 155 | * Remap an arbitrary physical address space into the kernel virtual |
55 | * address space. Needed when the kernel wants to access high addresses | 156 | * address space. Needed when the kernel wants to access high addresses |
@@ -163,6 +264,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
163 | ret_addr = (void __iomem *) (vaddr + offset); | 264 | ret_addr = (void __iomem *) (vaddr + offset); |
164 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); | 265 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
165 | 266 | ||
267 | if (insert_identity_mapping(phys_addr, vaddr, size)) | ||
268 | printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n", | ||
269 | (unsigned long long)phys_addr); | ||
270 | |||
166 | /* | 271 | /* |
167 | * Check if the request spans more than any BAR in the iomem resource | 272 | * Check if the request spans more than any BAR in the iomem resource |
168 | * tree. | 273 | * tree. |