diff options
author | Keith Mannthey <kmannth@us.ibm.com> | 2006-09-26 04:52:36 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:36 -0400 |
commit | 6ad916581181a105d7832a7dec9e1eb58f7a1621 (patch) | |
tree | 624409543fae127a0bc5a6267011f969fbaf03df /arch | |
parent | abf0f10948b316b577851ef21c728341f1046552 (diff) |
[PATCH] x86_64 kernel mapping fix
Fix for the x86_64 kernel mapping code. Without this patch the update path
only inits one pmd_page worth of memory and tramples any entries on it. now
the calling convention to phys_pmd_init and phys_init is to always pass a
[pmd/pud] page not an offset within a page.
Signed-off-by: Keith Mannthey<kmannth@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86_64/mm/init.c | 51 |
1 files changed, 26 insertions, 25 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index d40134bd6399..984155b75e4c 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -250,12 +250,13 @@ __init void early_iounmap(void *addr, unsigned long size) | |||
250 | } | 250 | } |
251 | 251 | ||
252 | static void __meminit | 252 | static void __meminit |
253 | phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | 253 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
254 | { | 254 | { |
255 | int i; | 255 | int i = pmd_index(address); |
256 | 256 | ||
257 | for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { | 257 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
258 | unsigned long entry; | 258 | unsigned long entry; |
259 | pmd_t *pmd = pmd_page + pmd_index(address); | ||
259 | 260 | ||
260 | if (address >= end) { | 261 | if (address >= end) { |
261 | if (!after_bootmem) | 262 | if (!after_bootmem) |
@@ -263,6 +264,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | |||
263 | set_pmd(pmd, __pmd(0)); | 264 | set_pmd(pmd, __pmd(0)); |
264 | break; | 265 | break; |
265 | } | 266 | } |
267 | |||
268 | if (pmd_val(*pmd)) | ||
269 | continue; | ||
270 | |||
266 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; | 271 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; |
267 | entry &= __supported_pte_mask; | 272 | entry &= __supported_pte_mask; |
268 | set_pmd(pmd, __pmd(entry)); | 273 | set_pmd(pmd, __pmd(entry)); |
@@ -272,45 +277,41 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | |||
272 | static void __meminit | 277 | static void __meminit |
273 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | 278 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) |
274 | { | 279 | { |
275 | pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address)); | 280 | pmd_t *pmd = pmd_offset(pud,0); |
276 | 281 | spin_lock(&init_mm.page_table_lock); | |
277 | if (pmd_none(*pmd)) { | 282 | phys_pmd_init(pmd, address, end); |
278 | spin_lock(&init_mm.page_table_lock); | 283 | spin_unlock(&init_mm.page_table_lock); |
279 | phys_pmd_init(pmd, address, end); | 284 | __flush_tlb_all(); |
280 | spin_unlock(&init_mm.page_table_lock); | ||
281 | __flush_tlb_all(); | ||
282 | } | ||
283 | } | 285 | } |
284 | 286 | ||
285 | static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) | 287 | static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) |
286 | { | 288 | { |
287 | long i = pud_index(address); | 289 | int i = pud_index(addr); |
288 | |||
289 | pud = pud + i; | ||
290 | 290 | ||
291 | if (after_bootmem && pud_val(*pud)) { | ||
292 | phys_pmd_update(pud, address, end); | ||
293 | return; | ||
294 | } | ||
295 | 291 | ||
296 | for (; i < PTRS_PER_PUD; pud++, i++) { | 292 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { |
297 | int map; | 293 | int map; |
298 | unsigned long paddr, pmd_phys; | 294 | unsigned long pmd_phys; |
295 | pud_t *pud = pud_page + pud_index(addr); | ||
299 | pmd_t *pmd; | 296 | pmd_t *pmd; |
300 | 297 | ||
301 | paddr = (address & PGDIR_MASK) + i*PUD_SIZE; | 298 | if (addr >= end) |
302 | if (paddr >= end) | ||
303 | break; | 299 | break; |
304 | 300 | ||
305 | if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) { | 301 | if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) { |
306 | set_pud(pud, __pud(0)); | 302 | set_pud(pud, __pud(0)); |
307 | continue; | 303 | continue; |
308 | } | 304 | } |
309 | 305 | ||
306 | if (pud_val(*pud)) { | ||
307 | phys_pmd_update(pud, addr, end); | ||
308 | continue; | ||
309 | } | ||
310 | |||
310 | pmd = alloc_low_page(&map, &pmd_phys); | 311 | pmd = alloc_low_page(&map, &pmd_phys); |
311 | spin_lock(&init_mm.page_table_lock); | 312 | spin_lock(&init_mm.page_table_lock); |
312 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); | 313 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
313 | phys_pmd_init(pmd, paddr, end); | 314 | phys_pmd_init(pmd, addr, end); |
314 | spin_unlock(&init_mm.page_table_lock); | 315 | spin_unlock(&init_mm.page_table_lock); |
315 | unmap_low_page(map); | 316 | unmap_low_page(map); |
316 | } | 317 | } |