diff options
author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2008-12-18 14:41:28 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-12-18 16:30:15 -0500 |
commit | e121e418441525b5636321fe03d16f0193ad218e (patch) | |
tree | d9f54fef9c4f137c2e8327edbf3ba8110dfb968d | |
parent | 3c8bb73ace6249bd089b70c941440441940e3365 (diff) |
x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3
Impact: New currently unused interface.
Add a generic interface to follow pfn in a pfnmap vma range. This is used by
one of the subsequent x86 PAT related patch to keep track of memory types
for vma regions across vma copy and free.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | mm/memory.c | 43 |
2 files changed, 46 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2be8d9b5e46f..a25024ff9c11 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1223,6 +1223,9 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, | |||
1223 | #define FOLL_GET 0x04 /* do get_page on page */ | 1223 | #define FOLL_GET 0x04 /* do get_page on page */ |
1224 | #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ | 1224 | #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ |
1225 | 1225 | ||
1226 | int follow_pfnmap_pte(struct vm_area_struct *vma, | ||
1227 | unsigned long address, pte_t *ret_ptep); | ||
1228 | |||
1226 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | 1229 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, |
1227 | void *data); | 1230 | void *data); |
1228 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, | 1231 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, |
diff --git a/mm/memory.c b/mm/memory.c index cef95c8c77fa..8ca6bbf34ad6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1111,6 +1111,49 @@ no_page_table: | |||
1111 | return page; | 1111 | return page; |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address, | ||
1115 | pte_t *ret_ptep) | ||
1116 | { | ||
1117 | pgd_t *pgd; | ||
1118 | pud_t *pud; | ||
1119 | pmd_t *pmd; | ||
1120 | pte_t *ptep, pte; | ||
1121 | spinlock_t *ptl; | ||
1122 | struct page *page; | ||
1123 | struct mm_struct *mm = vma->vm_mm; | ||
1124 | |||
1125 | if (!is_pfn_mapping(vma)) | ||
1126 | goto err; | ||
1127 | |||
1128 | page = NULL; | ||
1129 | pgd = pgd_offset(mm, address); | ||
1130 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
1131 | goto err; | ||
1132 | |||
1133 | pud = pud_offset(pgd, address); | ||
1134 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | ||
1135 | goto err; | ||
1136 | |||
1137 | pmd = pmd_offset(pud, address); | ||
1138 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
1139 | goto err; | ||
1140 | |||
1141 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
1142 | |||
1143 | pte = *ptep; | ||
1144 | if (!pte_present(pte)) | ||
1145 | goto err_unlock; | ||
1146 | |||
1147 | *ret_ptep = pte; | ||
1148 | pte_unmap_unlock(ptep, ptl); | ||
1149 | return 0; | ||
1150 | |||
1151 | err_unlock: | ||
1152 | pte_unmap_unlock(ptep, ptl); | ||
1153 | err: | ||
1154 | return -EINVAL; | ||
1155 | } | ||
1156 | |||
1114 | /* Can we do the FOLL_ANON optimization? */ | 1157 | /* Can we do the FOLL_ANON optimization? */ |
1115 | static inline int use_zero_page(struct vm_area_struct *vma) | 1158 | static inline int use_zero_page(struct vm_area_struct *vma) |
1116 | { | 1159 | { |