diff options
author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2008-12-19 16:47:28 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-12-19 18:40:30 -0500 |
commit | 982d789ab76c8a11426852fec2fdf2f412e21c0c (patch) | |
tree | 41e6932764facecb11bc9ca831ffd67ded384d68 /mm | |
parent | d87fe6607c31944f7572f965c1507ae77026c133 (diff) |
x86: PAT: remove follow_pfnmap_pte in favor of follow_phys
Impact: Cleanup - removes a new function in favor of a recently modified older one.
Replace follow_pfnmap_pte in pat code with follow_phys. follow_phys lso
returns protection eliminating the need of pte_pgprot call. Using follow_phys
also eliminates the need for pte_pa.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 43 |
1 files changed, 0 insertions, 43 deletions
diff --git a/mm/memory.c b/mm/memory.c index 79f28e35d4fc..6b29f39a5a3e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1168,49 +1168,6 @@ no_page_table: | |||
1168 | return page; | 1168 | return page; |
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address, | ||
1172 | pte_t *ret_ptep) | ||
1173 | { | ||
1174 | pgd_t *pgd; | ||
1175 | pud_t *pud; | ||
1176 | pmd_t *pmd; | ||
1177 | pte_t *ptep, pte; | ||
1178 | spinlock_t *ptl; | ||
1179 | struct page *page; | ||
1180 | struct mm_struct *mm = vma->vm_mm; | ||
1181 | |||
1182 | if (!is_pfn_mapping(vma)) | ||
1183 | goto err; | ||
1184 | |||
1185 | page = NULL; | ||
1186 | pgd = pgd_offset(mm, address); | ||
1187 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
1188 | goto err; | ||
1189 | |||
1190 | pud = pud_offset(pgd, address); | ||
1191 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | ||
1192 | goto err; | ||
1193 | |||
1194 | pmd = pmd_offset(pud, address); | ||
1195 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
1196 | goto err; | ||
1197 | |||
1198 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
1199 | |||
1200 | pte = *ptep; | ||
1201 | if (!pte_present(pte)) | ||
1202 | goto err_unlock; | ||
1203 | |||
1204 | *ret_ptep = pte; | ||
1205 | pte_unmap_unlock(ptep, ptl); | ||
1206 | return 0; | ||
1207 | |||
1208 | err_unlock: | ||
1209 | pte_unmap_unlock(ptep, ptl); | ||
1210 | err: | ||
1211 | return -EINVAL; | ||
1212 | } | ||
1213 | |||
1214 | /* Can we do the FOLL_ANON optimization? */ | 1171 | /* Can we do the FOLL_ANON optimization? */ |
1215 | static inline int use_zero_page(struct vm_area_struct *vma) | 1172 | static inline int use_zero_page(struct vm_area_struct *vma) |
1216 | { | 1173 | { |