aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 07:33:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:39 -0500
commit6c435456dc91ace468b4e9d72ad0e13dafa22a45 (patch)
treefebb56da4b1acb6dfae230290ce5638d3fbcaaab /arch
parenta89780f3b84f9a421e2608580b55f12b3ac4f9c2 (diff)
x86: add mm parameter to paravirt_alloc_pd
Add mm to paravirt_alloc_pd, partly to make it consistent with paravirt_alloc_pt, and because later changes will make use of it. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/pgtable_32.c4
3 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 2ee5d8e0ada5..4525bc2c2e19 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -398,7 +398,7 @@ static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
398 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 398 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
399} 399}
400 400
401static void vmi_allocate_pd(u32 pfn) 401static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn)
402{ 402{
403 /* 403 /*
404 * This call comes in very early, before mem_map is setup. 404 * This call comes in very early, before mem_map is setup.
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 04332c09ad1d..98d2acae4f64 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -65,7 +65,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
65 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 65 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
66 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 66 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
67 67
68 paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); 68 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
69 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 69 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
70 pud = pud_offset(pgd, 0); 70 pud = pud_offset(pgd, 0);
71 if (pmd_table != pmd_offset(pud, 0)) 71 if (pmd_table != pmd_offset(pud, 0))
@@ -365,7 +365,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
365 memset(&base[USER_PTRS_PER_PGD], 0, 365 memset(&base[USER_PTRS_PER_PGD], 0,
366 KERNEL_PGD_PTRS * sizeof(pgd_t)); 366 KERNEL_PGD_PTRS * sizeof(pgd_t));
367#else 367#else
368 paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT); 368 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
369#endif 369#endif
370} 370}
371 371
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index be61a1d845a4..f85ee44720d2 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -330,13 +330,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
330 if (PTRS_PER_PMD == 1 || !pgd) 330 if (PTRS_PER_PMD == 1 || !pgd)
331 return pgd; 331 return pgd;
332 332
333 mm->pgd = pgd; /* so that alloc_pd can use it */
334
333 for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { 335 for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
334 pmd_t *pmd = pmd_cache_alloc(i); 336 pmd_t *pmd = pmd_cache_alloc(i);
335 337
336 if (!pmd) 338 if (!pmd)
337 goto out_oom; 339 goto out_oom;
338 340
339 paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); 341 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
340 set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); 342 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
341 } 343 }
342 return pgd; 344 return pgd;