diff options
author | H. Peter Anvin <hpa@zytor.com> | 2008-12-23 13:10:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-24 04:40:19 -0500 |
commit | c1c15b65ec30275575dac9322aae607075769fbc (patch) | |
tree | d7e48801291eb6d3f70b05e6e8ffa2fe75ebae3c | |
parent | 67bac792cd0c05b4b6e0393c32605b028b8dd533 (diff) |
x86: PAT: fix address types in track_pfn_vma_new()
Impact: cleanup, fix warning
This warning:
arch/x86/mm/pat.c: In function track_pfn_vma_copy:
arch/x86/mm/pat.c:701: warning: passing argument 5 of follow_phys from incompatible pointer type
Triggers because physical addresses are resource_size_t, not u64.
This really matters when calling an interface like follow_phys() which
takes a pointer to a physical address -- although on x86, being
littleendian, it would generally work anyway as long as the memory region
wasn't completely uninitialized.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/pat.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 541bcc944a5b..85cbd3cd3723 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -684,7 +684,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
684 | { | 684 | { |
685 | int retval = 0; | 685 | int retval = 0; |
686 | unsigned long i, j; | 686 | unsigned long i, j; |
687 | u64 paddr; | 687 | resource_size_t paddr; |
688 | unsigned long prot; | 688 | unsigned long prot; |
689 | unsigned long vma_start = vma->vm_start; | 689 | unsigned long vma_start = vma->vm_start; |
690 | unsigned long vma_end = vma->vm_end; | 690 | unsigned long vma_end = vma->vm_end; |
@@ -746,8 +746,8 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | |||
746 | { | 746 | { |
747 | int retval = 0; | 747 | int retval = 0; |
748 | unsigned long i, j; | 748 | unsigned long i, j; |
749 | u64 base_paddr; | 749 | resource_size_t base_paddr; |
750 | u64 paddr; | 750 | resource_size_t paddr; |
751 | unsigned long vma_start = vma->vm_start; | 751 | unsigned long vma_start = vma->vm_start; |
752 | unsigned long vma_end = vma->vm_end; | 752 | unsigned long vma_end = vma->vm_end; |
753 | unsigned long vma_size = vma_end - vma_start; | 753 | unsigned long vma_size = vma_end - vma_start; |
@@ -757,12 +757,12 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | |||
757 | 757 | ||
758 | if (is_linear_pfn_mapping(vma)) { | 758 | if (is_linear_pfn_mapping(vma)) { |
759 | /* reserve the whole chunk starting from vm_pgoff */ | 759 | /* reserve the whole chunk starting from vm_pgoff */ |
760 | paddr = (u64)vma->vm_pgoff << PAGE_SHIFT; | 760 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
761 | return reserve_pfn_range(paddr, vma_size, prot); | 761 | return reserve_pfn_range(paddr, vma_size, prot); |
762 | } | 762 | } |
763 | 763 | ||
764 | /* reserve page by page using pfn and size */ | 764 | /* reserve page by page using pfn and size */ |
765 | base_paddr = (u64)pfn << PAGE_SHIFT; | 765 | base_paddr = (resource_size_t)pfn << PAGE_SHIFT; |
766 | for (i = 0; i < size; i += PAGE_SIZE) { | 766 | for (i = 0; i < size; i += PAGE_SIZE) { |
767 | paddr = base_paddr + i; | 767 | paddr = base_paddr + i; |
768 | retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); | 768 | retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); |
@@ -790,7 +790,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |||
790 | unsigned long size) | 790 | unsigned long size) |
791 | { | 791 | { |
792 | unsigned long i; | 792 | unsigned long i; |
793 | u64 paddr; | 793 | resource_size_t paddr; |
794 | unsigned long prot; | 794 | unsigned long prot; |
795 | unsigned long vma_start = vma->vm_start; | 795 | unsigned long vma_start = vma->vm_start; |
796 | unsigned long vma_end = vma->vm_end; | 796 | unsigned long vma_end = vma->vm_end; |
@@ -801,14 +801,14 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |||
801 | 801 | ||
802 | if (is_linear_pfn_mapping(vma)) { | 802 | if (is_linear_pfn_mapping(vma)) { |
803 | /* free the whole chunk starting from vm_pgoff */ | 803 | /* free the whole chunk starting from vm_pgoff */ |
804 | paddr = (u64)vma->vm_pgoff << PAGE_SHIFT; | 804 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
805 | free_pfn_range(paddr, vma_size); | 805 | free_pfn_range(paddr, vma_size); |
806 | return; | 806 | return; |
807 | } | 807 | } |
808 | 808 | ||
809 | if (size != 0 && size != vma_size) { | 809 | if (size != 0 && size != vma_size) { |
810 | /* free page by page, using pfn and size */ | 810 | /* free page by page, using pfn and size */ |
811 | paddr = (u64)pfn << PAGE_SHIFT; | 811 | paddr = (resource_size_t)pfn << PAGE_SHIFT; |
812 | for (i = 0; i < size; i += PAGE_SIZE) { | 812 | for (i = 0; i < size; i += PAGE_SIZE) { |
813 | paddr = paddr + i; | 813 | paddr = paddr + i; |
814 | free_pfn_range(paddr, PAGE_SIZE); | 814 | free_pfn_range(paddr, PAGE_SIZE); |