diff options
author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2008-12-19 16:47:27 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-12-19 18:40:30 -0500 |
commit | d87fe6607c31944f7572f965c1507ae77026c133 (patch) | |
tree | 56e41312b33ca3fe2bca50252f436e5f19e256a8 /mm/memory.c | |
parent | 6bd9cd50c830eb88d571c492ec370a30bf999e15 (diff) |
x86: PAT: modify follow_phys to return phys_addr prot and return value
Impact: Changes and globalizes an existing static interface.
Follow_phys does similar things as follow_pfnmap_pte. Make a minor change
to follow_phys so that it can be used in place of follow_pfnmap_pte.
Physical address return value with 0 as error return does not work in
follow_phys as the actual physical address 0 mapping may exist in pte.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1e8f0d347c0e..79f28e35d4fc 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2981,9 +2981,9 @@ int in_gate_area_no_task(unsigned long addr) | |||
2981 | #endif /* __HAVE_ARCH_GATE_AREA */ | 2981 | #endif /* __HAVE_ARCH_GATE_AREA */ |
2982 | 2982 | ||
2983 | #ifdef CONFIG_HAVE_IOREMAP_PROT | 2983 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
2984 | static resource_size_t follow_phys(struct vm_area_struct *vma, | 2984 | int follow_phys(struct vm_area_struct *vma, |
2985 | unsigned long address, unsigned int flags, | 2985 | unsigned long address, unsigned int flags, |
2986 | unsigned long *prot) | 2986 | unsigned long *prot, resource_size_t *phys) |
2987 | { | 2987 | { |
2988 | pgd_t *pgd; | 2988 | pgd_t *pgd; |
2989 | pud_t *pud; | 2989 | pud_t *pud; |
@@ -2992,24 +2992,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma, | |||
2992 | spinlock_t *ptl; | 2992 | spinlock_t *ptl; |
2993 | resource_size_t phys_addr = 0; | 2993 | resource_size_t phys_addr = 0; |
2994 | struct mm_struct *mm = vma->vm_mm; | 2994 | struct mm_struct *mm = vma->vm_mm; |
2995 | int ret = -EINVAL; | ||
2995 | 2996 | ||
2996 | VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); | 2997 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
2998 | goto out; | ||
2997 | 2999 | ||
2998 | pgd = pgd_offset(mm, address); | 3000 | pgd = pgd_offset(mm, address); |
2999 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | 3001 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
3000 | goto no_page_table; | 3002 | goto out; |
3001 | 3003 | ||
3002 | pud = pud_offset(pgd, address); | 3004 | pud = pud_offset(pgd, address); |
3003 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 3005 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) |
3004 | goto no_page_table; | 3006 | goto out; |
3005 | 3007 | ||
3006 | pmd = pmd_offset(pud, address); | 3008 | pmd = pmd_offset(pud, address); |
3007 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | 3009 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) |
3008 | goto no_page_table; | 3010 | goto out; |
3009 | 3011 | ||
3010 | /* We cannot handle huge page PFN maps. Luckily they don't exist. */ | 3012 | /* We cannot handle huge page PFN maps. Luckily they don't exist. */ |
3011 | if (pmd_huge(*pmd)) | 3013 | if (pmd_huge(*pmd)) |
3012 | goto no_page_table; | 3014 | goto out; |
3013 | 3015 | ||
3014 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 3016 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
3015 | if (!ptep) | 3017 | if (!ptep) |
@@ -3024,13 +3026,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma, | |||
3024 | phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ | 3026 | phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ |
3025 | 3027 | ||
3026 | *prot = pgprot_val(pte_pgprot(pte)); | 3028 | *prot = pgprot_val(pte_pgprot(pte)); |
3029 | *phys = phys_addr; | ||
3030 | ret = 0; | ||
3027 | 3031 | ||
3028 | unlock: | 3032 | unlock: |
3029 | pte_unmap_unlock(ptep, ptl); | 3033 | pte_unmap_unlock(ptep, ptl); |
3030 | out: | 3034 | out: |
3031 | return phys_addr; | 3035 | return ret; |
3032 | no_page_table: | ||
3033 | return 0; | ||
3034 | } | 3036 | } |
3035 | 3037 | ||
3036 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | 3038 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
@@ -3041,12 +3043,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | |||
3041 | void *maddr; | 3043 | void *maddr; |
3042 | int offset = addr & (PAGE_SIZE-1); | 3044 | int offset = addr & (PAGE_SIZE-1); |
3043 | 3045 | ||
3044 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | 3046 | if (follow_phys(vma, addr, write, &prot, &phys_addr)) |
3045 | return -EINVAL; | ||
3046 | |||
3047 | phys_addr = follow_phys(vma, addr, write, &prot); | ||
3048 | |||
3049 | if (!phys_addr) | ||
3050 | return -EINVAL; | 3047 | return -EINVAL; |
3051 | 3048 | ||
3052 | maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); | 3049 | maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); |