diff options
| author | Eduardo Habkost <ehabkost@redhat.com> | 2008-07-30 17:32:27 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-08-21 23:34:44 -0400 |
| commit | f86399396ce7a4f4069828b7dceac5aa5113dfb5 (patch) | |
| tree | 2f5140fc4acdc33d615cd776bf212e310dc9345b | |
| parent | 6e86841d05f371b5b9b86ce76c02aaee83352298 (diff) | |
x86, paravirt_ops: use unsigned long instead of u32 for alloc_p*() pfn args
This patch changes the pfn args from 'u32' to 'unsigned long'
on alloc_p*() functions on paravirt_ops, and the corresponding
implementations for Xen and VMI. The prototypes for CONFIG_PARAVIRT=n
are already using unsigned long, so paravirt.h now matches the prototypes
on asm-x86/pgalloc.h.
It shouldn't result in any changes on generated code on 32-bit, with
or without CONFIG_PARAVIRT. On both cases, 'codiff -f' didn't show any
change after applying this patch.
On 64-bit, there are (expected) binary changes only when CONFIG_PARAVIRT
is enabled, as the patch is really supposed to change the size of the
pfn args.
[ v2: KVM_GUEST: use the right parameter type on kvm_release_pt() ]
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Acked-by: Jeremy Fitzhardinge <jeremy@goop.org>
Acked-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/x86/kernel/kvm.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/vmi_32.c | 10 | ||||
| -rw-r--r-- | arch/x86/xen/enlighten.c | 20 | ||||
| -rw-r--r-- | include/asm-x86/paravirt.h | 30 |
4 files changed, 31 insertions, 31 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8b7a3cf37d2b..478bca986eca 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
| @@ -178,7 +178,7 @@ static void kvm_flush_tlb(void) | |||
| 178 | kvm_deferred_mmu_op(&ftlb, sizeof ftlb); | 178 | kvm_deferred_mmu_op(&ftlb, sizeof ftlb); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | static void kvm_release_pt(u32 pfn) | 181 | static void kvm_release_pt(unsigned long pfn) |
| 182 | { | 182 | { |
| 183 | struct kvm_mmu_op_release_pt rpt = { | 183 | struct kvm_mmu_op_release_pt rpt = { |
| 184 | .header.op = KVM_MMU_OP_RELEASE_PT, | 184 | .header.op = KVM_MMU_OP_RELEASE_PT, |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 0a1b1a9d922d..340b03643b95 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
| @@ -392,13 +392,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) | |||
| 392 | } | 392 | } |
| 393 | #endif | 393 | #endif |
| 394 | 394 | ||
| 395 | static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn) | 395 | static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) |
| 396 | { | 396 | { |
| 397 | vmi_set_page_type(pfn, VMI_PAGE_L1); | 397 | vmi_set_page_type(pfn, VMI_PAGE_L1); |
| 398 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | 398 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) | 401 | static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn) |
| 402 | { | 402 | { |
| 403 | /* | 403 | /* |
| 404 | * This call comes in very early, before mem_map is setup. | 404 | * This call comes in very early, before mem_map is setup. |
| @@ -409,20 +409,20 @@ static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) | |||
| 409 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); | 409 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) | 412 | static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count) |
| 413 | { | 413 | { |
| 414 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); | 414 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); |
| 415 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); | 415 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); |
| 416 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); | 416 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); |
| 417 | } | 417 | } |
| 418 | 418 | ||
| 419 | static void vmi_release_pte(u32 pfn) | 419 | static void vmi_release_pte(unsigned long pfn) |
| 420 | { | 420 | { |
| 421 | vmi_ops.release_page(pfn, VMI_PAGE_L1); | 421 | vmi_ops.release_page(pfn, VMI_PAGE_L1); |
| 422 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | 422 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); |
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | static void vmi_release_pmd(u32 pfn) | 425 | static void vmi_release_pmd(unsigned long pfn) |
| 426 | { | 426 | { |
| 427 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | 427 | vmi_ops.release_page(pfn, VMI_PAGE_L2); |
| 428 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | 428 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 9ff6e3cbf08f..db970bdc5e3d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -812,7 +812,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | |||
| 812 | 812 | ||
| 813 | /* Early in boot, while setting up the initial pagetable, assume | 813 | /* Early in boot, while setting up the initial pagetable, assume |
| 814 | everything is pinned. */ | 814 | everything is pinned. */ |
| 815 | static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) | 815 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
| 816 | { | 816 | { |
| 817 | #ifdef CONFIG_FLATMEM | 817 | #ifdef CONFIG_FLATMEM |
| 818 | BUG_ON(mem_map); /* should only be used early */ | 818 | BUG_ON(mem_map); /* should only be used early */ |
| @@ -822,7 +822,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) | |||
| 822 | 822 | ||
| 823 | /* Early release_pte assumes that all pts are pinned, since there's | 823 | /* Early release_pte assumes that all pts are pinned, since there's |
| 824 | only init_mm and anything attached to that is pinned. */ | 824 | only init_mm and anything attached to that is pinned. */ |
| 825 | static void xen_release_pte_init(u32 pfn) | 825 | static void xen_release_pte_init(unsigned long pfn) |
| 826 | { | 826 | { |
| 827 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 827 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
| 828 | } | 828 | } |
| @@ -838,7 +838,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |||
| 838 | 838 | ||
| 839 | /* This needs to make sure the new pte page is pinned iff its being | 839 | /* This needs to make sure the new pte page is pinned iff its being |
| 840 | attached to a pinned pagetable. */ | 840 | attached to a pinned pagetable. */ |
| 841 | static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) | 841 | static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) |
| 842 | { | 842 | { |
| 843 | struct page *page = pfn_to_page(pfn); | 843 | struct page *page = pfn_to_page(pfn); |
| 844 | 844 | ||
| @@ -856,12 +856,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) | |||
| 856 | } | 856 | } |
| 857 | } | 857 | } |
| 858 | 858 | ||
| 859 | static void xen_alloc_pte(struct mm_struct *mm, u32 pfn) | 859 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
| 860 | { | 860 | { |
| 861 | xen_alloc_ptpage(mm, pfn, PT_PTE); | 861 | xen_alloc_ptpage(mm, pfn, PT_PTE); |
| 862 | } | 862 | } |
| 863 | 863 | ||
| 864 | static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn) | 864 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
| 865 | { | 865 | { |
| 866 | xen_alloc_ptpage(mm, pfn, PT_PMD); | 866 | xen_alloc_ptpage(mm, pfn, PT_PMD); |
| 867 | } | 867 | } |
| @@ -909,7 +909,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 909 | } | 909 | } |
| 910 | 910 | ||
| 911 | /* This should never happen until we're OK to use struct page */ | 911 | /* This should never happen until we're OK to use struct page */ |
| 912 | static void xen_release_ptpage(u32 pfn, unsigned level) | 912 | static void xen_release_ptpage(unsigned long pfn, unsigned level) |
| 913 | { | 913 | { |
| 914 | struct page *page = pfn_to_page(pfn); | 914 | struct page *page = pfn_to_page(pfn); |
| 915 | 915 | ||
| @@ -923,23 +923,23 @@ static void xen_release_ptpage(u32 pfn, unsigned level) | |||
| 923 | } | 923 | } |
| 924 | } | 924 | } |
| 925 | 925 | ||
| 926 | static void xen_release_pte(u32 pfn) | 926 | static void xen_release_pte(unsigned long pfn) |
| 927 | { | 927 | { |
| 928 | xen_release_ptpage(pfn, PT_PTE); | 928 | xen_release_ptpage(pfn, PT_PTE); |
| 929 | } | 929 | } |
| 930 | 930 | ||
| 931 | static void xen_release_pmd(u32 pfn) | 931 | static void xen_release_pmd(unsigned long pfn) |
| 932 | { | 932 | { |
| 933 | xen_release_ptpage(pfn, PT_PMD); | 933 | xen_release_ptpage(pfn, PT_PMD); |
| 934 | } | 934 | } |
| 935 | 935 | ||
| 936 | #if PAGETABLE_LEVELS == 4 | 936 | #if PAGETABLE_LEVELS == 4 |
| 937 | static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) | 937 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
| 938 | { | 938 | { |
| 939 | xen_alloc_ptpage(mm, pfn, PT_PUD); | 939 | xen_alloc_ptpage(mm, pfn, PT_PUD); |
| 940 | } | 940 | } |
| 941 | 941 | ||
| 942 | static void xen_release_pud(u32 pfn) | 942 | static void xen_release_pud(unsigned long pfn) |
| 943 | { | 943 | { |
| 944 | xen_release_ptpage(pfn, PT_PUD); | 944 | xen_release_ptpage(pfn, PT_PUD); |
| 945 | } | 945 | } |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index fbbde93f12d6..497aea0f41ac 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
| @@ -257,13 +257,13 @@ struct pv_mmu_ops { | |||
| 257 | * Hooks for allocating/releasing pagetable pages when they're | 257 | * Hooks for allocating/releasing pagetable pages when they're |
| 258 | * attached to a pagetable | 258 | * attached to a pagetable |
| 259 | */ | 259 | */ |
| 260 | void (*alloc_pte)(struct mm_struct *mm, u32 pfn); | 260 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
| 261 | void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); | 261 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
| 262 | void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 262 | void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); |
| 263 | void (*alloc_pud)(struct mm_struct *mm, u32 pfn); | 263 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
| 264 | void (*release_pte)(u32 pfn); | 264 | void (*release_pte)(unsigned long pfn); |
| 265 | void (*release_pmd)(u32 pfn); | 265 | void (*release_pmd)(unsigned long pfn); |
| 266 | void (*release_pud)(u32 pfn); | 266 | void (*release_pud)(unsigned long pfn); |
| 267 | 267 | ||
| 268 | /* Pagetable manipulation functions */ | 268 | /* Pagetable manipulation functions */ |
| 269 | void (*set_pte)(pte_t *ptep, pte_t pteval); | 269 | void (*set_pte)(pte_t *ptep, pte_t pteval); |
| @@ -993,35 +993,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 993 | PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); | 993 | PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); |
| 994 | } | 994 | } |
| 995 | 995 | ||
| 996 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) | 996 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
| 997 | { | 997 | { |
| 998 | PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); | 998 | PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); |
| 999 | } | 999 | } |
| 1000 | static inline void paravirt_release_pte(unsigned pfn) | 1000 | static inline void paravirt_release_pte(unsigned long pfn) |
| 1001 | { | 1001 | { |
| 1002 | PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); | 1002 | PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); |
| 1003 | } | 1003 | } |
| 1004 | 1004 | ||
| 1005 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) | 1005 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
| 1006 | { | 1006 | { |
| 1007 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); | 1007 | PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); |
| 1008 | } | 1008 | } |
| 1009 | 1009 | ||
| 1010 | static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, | 1010 | static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, |
| 1011 | unsigned start, unsigned count) | 1011 | unsigned long start, unsigned long count) |
| 1012 | { | 1012 | { |
| 1013 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); | 1013 | PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); |
| 1014 | } | 1014 | } |
| 1015 | static inline void paravirt_release_pmd(unsigned pfn) | 1015 | static inline void paravirt_release_pmd(unsigned long pfn) |
| 1016 | { | 1016 | { |
| 1017 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); | 1017 | PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); |
| 1018 | } | 1018 | } |
| 1019 | 1019 | ||
| 1020 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) | 1020 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
| 1021 | { | 1021 | { |
| 1022 | PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); | 1022 | PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); |
| 1023 | } | 1023 | } |
| 1024 | static inline void paravirt_release_pud(unsigned pfn) | 1024 | static inline void paravirt_release_pud(unsigned long pfn) |
| 1025 | { | 1025 | { |
| 1026 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); | 1026 | PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); |
| 1027 | } | 1027 | } |
