aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-03-17 19:37:01 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-24 17:57:31 -0400
commit6944a9c8945212a0cc1de3589736d59ec542c539 (patch)
tree3234ea6c8690c9b69d91ab9c9bd3adb0beb4e976
parent394158559d4c912cc58c311b6346cdea0ed2b1de (diff)
x86: rename paravirt_alloc_pt etc after the pagetable structure
Rename (alloc|release)_(pt|pd) to pte/pmd to explicitly match the name of the appropriate pagetable level structure. [ x86.git merge work by Mark McLoughlin <markmc@redhat.com> ] Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/paravirt.c10
-rw-r--r--arch/x86/kernel/vmi_32.c20
-rw-r--r--arch/x86/mm/init_32.c6
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pgtable.c18
-rw-r--r--arch/x86/xen/enlighten.c32
-rw-r--r--include/asm-x86/paravirt.h32
-rw-r--r--include/asm-x86/pgalloc.h16
9 files changed, 69 insertions, 69 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 3733412d1357..362653da003f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -366,11 +366,11 @@ struct pv_mmu_ops pv_mmu_ops = {
366 .flush_tlb_single = native_flush_tlb_single, 366 .flush_tlb_single = native_flush_tlb_single,
367 .flush_tlb_others = native_flush_tlb_others, 367 .flush_tlb_others = native_flush_tlb_others,
368 368
369 .alloc_pt = paravirt_nop, 369 .alloc_pte = paravirt_nop,
370 .alloc_pd = paravirt_nop, 370 .alloc_pmd = paravirt_nop,
371 .alloc_pd_clone = paravirt_nop, 371 .alloc_pmd_clone = paravirt_nop,
372 .release_pt = paravirt_nop, 372 .release_pte = paravirt_nop,
373 .release_pd = paravirt_nop, 373 .release_pmd = paravirt_nop,
374 374
375 .set_pte = native_set_pte, 375 .set_pte = native_set_pte,
376 .set_pte_at = native_set_pte_at, 376 .set_pte_at = native_set_pte_at,
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 12affe1f9bce..44f7ca153b71 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -392,13 +392,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
392} 392}
393#endif 393#endif
394 394
395static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn) 395static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn)
396{ 396{
397 vmi_set_page_type(pfn, VMI_PAGE_L1); 397 vmi_set_page_type(pfn, VMI_PAGE_L1);
398 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 398 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
399} 399}
400 400
401static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn) 401static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
402{ 402{
403 /* 403 /*
404 * This call comes in very early, before mem_map is setup. 404 * This call comes in very early, before mem_map is setup.
@@ -409,20 +409,20 @@ static void vmi_allocate_pd(struct mm_struct *mm, u32 pfn)
409 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); 409 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
410} 410}
411 411
412static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) 412static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
413{ 413{
414 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); 414 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
415 vmi_check_page_type(clonepfn, VMI_PAGE_L2); 415 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
416 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); 416 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
417} 417}
418 418
419static void vmi_release_pt(u32 pfn) 419static void vmi_release_pte(u32 pfn)
420{ 420{
421 vmi_ops.release_page(pfn, VMI_PAGE_L1); 421 vmi_ops.release_page(pfn, VMI_PAGE_L1);
422 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 422 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
423} 423}
424 424
425static void vmi_release_pd(u32 pfn) 425static void vmi_release_pmd(u32 pfn)
426{ 426{
427 vmi_ops.release_page(pfn, VMI_PAGE_L2); 427 vmi_ops.release_page(pfn, VMI_PAGE_L2);
428 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 428 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
@@ -871,15 +871,15 @@ static inline int __init activate_vmi(void)
871 871
872 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); 872 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
873 if (vmi_ops.allocate_page) { 873 if (vmi_ops.allocate_page) {
874 pv_mmu_ops.alloc_pt = vmi_allocate_pt; 874 pv_mmu_ops.alloc_pte = vmi_allocate_pte;
875 pv_mmu_ops.alloc_pd = vmi_allocate_pd; 875 pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
876 pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone; 876 pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
877 } 877 }
878 878
879 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); 879 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
880 if (vmi_ops.release_page) { 880 if (vmi_ops.release_page) {
881 pv_mmu_ops.release_pt = vmi_release_pt; 881 pv_mmu_ops.release_pte = vmi_release_pte;
882 pv_mmu_ops.release_pd = vmi_release_pd; 882 pv_mmu_ops.release_pmd = vmi_release_pmd;
883 } 883 }
884 884
885 /* Set linear is needed in all cases */ 885 /* Set linear is needed in all cases */
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9ec62da85fd7..df490905f377 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -71,7 +71,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
73 73
74 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 74 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
76 pud = pud_offset(pgd, 0); 76 pud = pud_offset(pgd, 0);
77 BUG_ON(pmd_table != pmd_offset(pud, 0)); 77 BUG_ON(pmd_table != pmd_offset(pud, 0));
@@ -100,7 +100,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
101 } 101 }
102 102
103 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); 103 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
104 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 104 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 105 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
106 } 106 }
@@ -365,7 +365,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
365 365
366 pte_clear(NULL, va, pte); 366 pte_clear(NULL, va, pte);
367 } 367 }
368 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); 368 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
369} 369}
370 370
371void __init native_pagetable_setup_done(pgd_t *base) 371void __init native_pagetable_setup_done(pgd_t *base)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 3a4baf95e24d..36a3f7ded626 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -407,7 +407,7 @@ void __init early_ioremap_clear(void)
407 407
408 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 408 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
409 pmd_clear(pmd); 409 pmd_clear(pmd);
410 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); 410 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
411 __flush_tlb_all(); 411 __flush_tlb_all();
412} 412}
413 413
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 938130d49b76..57e762c141f7 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -483,7 +483,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
483 goto out_unlock; 483 goto out_unlock;
484 484
485 pbase = (pte_t *)page_address(base); 485 pbase = (pte_t *)page_address(base);
486 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); 486 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
487 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 487 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
488 488
489#ifdef CONFIG_X86_64 489#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 0d2866b8f425..1d44d6dd4c9f 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -24,14 +24,14 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 24void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
25{ 25{
26 pgtable_page_dtor(pte); 26 pgtable_page_dtor(pte);
27 paravirt_release_pt(page_to_pfn(pte)); 27 paravirt_release_pte(page_to_pfn(pte));
28 tlb_remove_page(tlb, pte); 28 tlb_remove_page(tlb, pte);
29} 29}
30 30
31#if PAGETABLE_LEVELS > 2 31#if PAGETABLE_LEVELS > 2
32void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 32void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
33{ 33{
34 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); 34 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
35 tlb_remove_page(tlb, virt_to_page(pmd)); 35 tlb_remove_page(tlb, virt_to_page(pmd));
36} 36}
37 37
@@ -122,10 +122,10 @@ static void pgd_ctor(void *p)
122 clone_pgd_range(pgd + USER_PTRS_PER_PGD, 122 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
123 swapper_pg_dir + USER_PTRS_PER_PGD, 123 swapper_pg_dir + USER_PTRS_PER_PGD,
124 KERNEL_PGD_PTRS); 124 KERNEL_PGD_PTRS);
125 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, 125 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
126 __pa(swapper_pg_dir) >> PAGE_SHIFT, 126 __pa(swapper_pg_dir) >> PAGE_SHIFT,
127 USER_PTRS_PER_PGD, 127 USER_PTRS_PER_PGD,
128 KERNEL_PGD_PTRS); 128 KERNEL_PGD_PTRS);
129 } 129 }
130 130
131 /* list required to sync kernel mapping updates */ 131 /* list required to sync kernel mapping updates */
@@ -166,7 +166,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
166 166
167 pgdp[i] = native_make_pgd(0); 167 pgdp[i] = native_make_pgd(0);
168 168
169 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); 169 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
170 pmd_free(mm, pmd); 170 pmd_free(mm, pmd);
171 } 171 }
172 } 172 }
@@ -211,7 +211,7 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
211 211
212void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 212void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
213{ 213{
214 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); 214 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
215 215
216 /* Note: almost everything apart from _PAGE_PRESENT is 216 /* Note: almost everything apart from _PAGE_PRESENT is
217 reserved at the pmd (PDPT) level. */ 217 reserved at the pmd (PDPT) level. */
@@ -242,7 +242,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
242{ 242{
243 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 243 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
244 244
245 /* so that alloc_pd can use it */ 245 /* so that alloc_pmd can use it */
246 mm->pgd = pgd; 246 mm->pgd = pgd;
247 if (pgd) 247 if (pgd)
248 pgd_ctor(pgd); 248 pgd_ctor(pgd);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0388220cf97..36f36e6b0874 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -655,15 +655,15 @@ static void xen_write_cr3(unsigned long cr3)
655 655
656/* Early in boot, while setting up the initial pagetable, assume 656/* Early in boot, while setting up the initial pagetable, assume
657 everything is pinned. */ 657 everything is pinned. */
658static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) 658static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
659{ 659{
660 BUG_ON(mem_map); /* should only be used early */ 660 BUG_ON(mem_map); /* should only be used early */
661 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 661 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
662} 662}
663 663
664/* Early release_pt assumes that all pts are pinned, since there's 664/* Early release_pte assumes that all pts are pinned, since there's
665 only init_mm and anything attached to that is pinned. */ 665 only init_mm and anything attached to that is pinned. */
666static void xen_release_pt_init(u32 pfn) 666static void xen_release_pte_init(u32 pfn)
667{ 667{
668 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 668 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
669} 669}
@@ -697,12 +697,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
697 } 697 }
698} 698}
699 699
700static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) 700static void xen_alloc_pte(struct mm_struct *mm, u32 pfn)
701{ 701{
702 xen_alloc_ptpage(mm, pfn, PT_PTE); 702 xen_alloc_ptpage(mm, pfn, PT_PTE);
703} 703}
704 704
705static void xen_alloc_pd(struct mm_struct *mm, u32 pfn) 705static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn)
706{ 706{
707 xen_alloc_ptpage(mm, pfn, PT_PMD); 707 xen_alloc_ptpage(mm, pfn, PT_PMD);
708} 708}
@@ -722,12 +722,12 @@ static void xen_release_ptpage(u32 pfn, unsigned level)
722 } 722 }
723} 723}
724 724
725static void xen_release_pt(u32 pfn) 725static void xen_release_pte(u32 pfn)
726{ 726{
727 xen_release_ptpage(pfn, PT_PTE); 727 xen_release_ptpage(pfn, PT_PTE);
728} 728}
729 729
730static void xen_release_pd(u32 pfn) 730static void xen_release_pmd(u32 pfn)
731{ 731{
732 xen_release_ptpage(pfn, PT_PMD); 732 xen_release_ptpage(pfn, PT_PMD);
733} 733}
@@ -849,10 +849,10 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
849{ 849{
850 /* This will work as long as patching hasn't happened yet 850 /* This will work as long as patching hasn't happened yet
851 (which it hasn't) */ 851 (which it hasn't) */
852 pv_mmu_ops.alloc_pt = xen_alloc_pt; 852 pv_mmu_ops.alloc_pte = xen_alloc_pte;
853 pv_mmu_ops.alloc_pd = xen_alloc_pd; 853 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
854 pv_mmu_ops.release_pt = xen_release_pt; 854 pv_mmu_ops.release_pte = xen_release_pte;
855 pv_mmu_ops.release_pd = xen_release_pd; 855 pv_mmu_ops.release_pmd = xen_release_pmd;
856 pv_mmu_ops.set_pte = xen_set_pte; 856 pv_mmu_ops.set_pte = xen_set_pte;
857 857
858 setup_shared_info(); 858 setup_shared_info();
@@ -1059,11 +1059,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1059 .pte_update = paravirt_nop, 1059 .pte_update = paravirt_nop,
1060 .pte_update_defer = paravirt_nop, 1060 .pte_update_defer = paravirt_nop,
1061 1061
1062 .alloc_pt = xen_alloc_pt_init, 1062 .alloc_pte = xen_alloc_pte_init,
1063 .release_pt = xen_release_pt_init, 1063 .release_pte = xen_release_pte_init,
1064 .alloc_pd = xen_alloc_pt_init, 1064 .alloc_pmd = xen_alloc_pte_init,
1065 .alloc_pd_clone = paravirt_nop, 1065 .alloc_pmd_clone = paravirt_nop,
1066 .release_pd = xen_release_pt_init, 1066 .release_pmd = xen_release_pte_init,
1067 1067
1068#ifdef CONFIG_HIGHPTE 1068#ifdef CONFIG_HIGHPTE
1069 .kmap_atomic_pte = xen_kmap_atomic_pte, 1069 .kmap_atomic_pte = xen_kmap_atomic_pte,
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 3d419398499b..c4480b9bda5d 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -220,11 +220,11 @@ struct pv_mmu_ops {
220 unsigned long va); 220 unsigned long va);
221 221
222 /* Hooks for allocating/releasing pagetable pages */ 222 /* Hooks for allocating/releasing pagetable pages */
223 void (*alloc_pt)(struct mm_struct *mm, u32 pfn); 223 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
224 void (*alloc_pd)(struct mm_struct *mm, u32 pfn); 224 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
225 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 225 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
226 void (*release_pt)(u32 pfn); 226 void (*release_pte)(u32 pfn);
227 void (*release_pd)(u32 pfn); 227 void (*release_pmd)(u32 pfn);
228 228
229 /* Pagetable manipulation functions */ 229 /* Pagetable manipulation functions */
230 void (*set_pte)(pte_t *ptep, pte_t pteval); 230 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -910,28 +910,28 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
910 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 910 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
911} 911}
912 912
913static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) 913static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
914{ 914{
915 PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn); 915 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
916} 916}
917static inline void paravirt_release_pt(unsigned pfn) 917static inline void paravirt_release_pte(unsigned pfn)
918{ 918{
919 PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); 919 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
920} 920}
921 921
922static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn) 922static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
923{ 923{
924 PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn); 924 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
925} 925}
926 926
927static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, 927static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
928 unsigned start, unsigned count) 928 unsigned start, unsigned count)
929{ 929{
930 PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count); 930 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
931} 931}
932static inline void paravirt_release_pd(unsigned pfn) 932static inline void paravirt_release_pmd(unsigned pfn)
933{ 933{
934 PVOP_VCALL1(pv_mmu_ops.release_pd, pfn); 934 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
935} 935}
936 936
937#ifdef CONFIG_HIGHPTE 937#ifdef CONFIG_HIGHPTE
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 73e5b0318476..a25d54029874 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -8,11 +8,11 @@
8#ifdef CONFIG_PARAVIRT 8#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h> 9#include <asm/paravirt.h>
10#else 10#else
11#define paravirt_alloc_pt(mm, pfn) do { } while (0) 11#define paravirt_alloc_pte(mm, pfn) do { } while (0)
12#define paravirt_alloc_pd(mm, pfn) do { } while (0) 12#define paravirt_alloc_pmd(mm, pfn) do { } while (0)
13#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) 13#define paravirt_alloc_pmd_clone(pfn, clonepfn, start, count) do { } while (0)
14#define paravirt_release_pt(pfn) do { } while (0) 14#define paravirt_release_pte(pfn) do { } while (0)
15#define paravirt_release_pd(pfn) do { } while (0) 15#define paravirt_release_pmd(pfn) do { } while (0)
16#endif 16#endif
17 17
18/* 18/*
@@ -43,7 +43,7 @@ extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
43static inline void pmd_populate_kernel(struct mm_struct *mm, 43static inline void pmd_populate_kernel(struct mm_struct *mm,
44 pmd_t *pmd, pte_t *pte) 44 pmd_t *pmd, pte_t *pte)
45{ 45{
46 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); 46 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
47 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); 47 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
48} 48}
49 49
@@ -52,7 +52,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
52{ 52{
53 unsigned long pfn = page_to_pfn(pte); 53 unsigned long pfn = page_to_pfn(pte);
54 54
55 paravirt_alloc_pt(mm, pfn); 55 paravirt_alloc_pte(mm, pfn);
56 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); 56 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
57} 57}
58 58
@@ -77,7 +77,7 @@ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
77#else /* !CONFIG_X86_PAE */ 77#else /* !CONFIG_X86_PAE */
78static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 78static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
79{ 79{
80 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); 80 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
81 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); 81 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
82} 82}
83#endif /* CONFIG_X86_PAE */ 83#endif /* CONFIG_X86_PAE */