aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-12-18 14:41:28 -0500
committerH. Peter Anvin <hpa@zytor.com>2008-12-18 16:30:15 -0500
commite121e418441525b5636321fe03d16f0193ad218e (patch)
treed9f54fef9c4f137c2e8327edbf3ba8110dfb968d /mm
parent3c8bb73ace6249bd089b70c941440441940e3365 (diff)
x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3
Impact: New currently unused interface. Add a generic interface to follow pfn in a pfnmap vma range. This is used by one of the subsequent x86 PAT related patch to keep track of memory types for vma regions across vma copy and free. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index cef95c8c77fa..8ca6bbf34ad6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1111,6 +1111,49 @@ no_page_table:
1111 return page; 1111 return page;
1112} 1112}
1113 1113
1114int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
1115 pte_t *ret_ptep)
1116{
1117 pgd_t *pgd;
1118 pud_t *pud;
1119 pmd_t *pmd;
1120 pte_t *ptep, pte;
1121 spinlock_t *ptl;
1122 struct page *page;
1123 struct mm_struct *mm = vma->vm_mm;
1124
1125 if (!is_pfn_mapping(vma))
1126 goto err;
1127
1128 page = NULL;
1129 pgd = pgd_offset(mm, address);
1130 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1131 goto err;
1132
1133 pud = pud_offset(pgd, address);
1134 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
1135 goto err;
1136
1137 pmd = pmd_offset(pud, address);
1138 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
1139 goto err;
1140
1141 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1142
1143 pte = *ptep;
1144 if (!pte_present(pte))
1145 goto err_unlock;
1146
1147 *ret_ptep = pte;
1148 pte_unmap_unlock(ptep, ptl);
1149 return 0;
1150
1151err_unlock:
1152 pte_unmap_unlock(ptep, ptl);
1153err:
1154 return -EINVAL;
1155}
1156
1114/* Can we do the FOLL_ANON optimization? */ 1157/* Can we do the FOLL_ANON optimization? */
1115static inline int use_zero_page(struct vm_area_struct *vma) 1158static inline int use_zero_page(struct vm_area_struct *vma)
1116{ 1159{