diff options
| author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2008-12-19 16:47:29 -0500 |
|---|---|---|
| committer | H. Peter Anvin <hpa@zytor.com> | 2008-12-19 18:40:30 -0500 |
| commit | 34801ba9bf0381fcf0e2b08179d2c07f2c6ede74 (patch) | |
| tree | e14d979cb9abf9220d0c60b04505851805975398 | |
| parent | 982d789ab76c8a11426852fec2fdf2f412e21c0c (diff) | |
x86: PAT: move track untrack pfnmap stubs to asm-generic
Impact: Cleanup and branch hints only.
Move the track and untrack pfn stub routines from memory.c to asm-generic.
Also add unlikely to pfnmap related calls in fork and exit path.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
| -rw-r--r-- | arch/x86/include/asm/pgtable.h | 6 | ||||
| -rw-r--r-- | include/asm-generic/pgtable.h | 46 | ||||
| -rw-r--r-- | include/linux/mm.h | 6 | ||||
| -rw-r--r-- | mm/memory.c | 48 |
4 files changed, 50 insertions, 56 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 2aa792bbd7e0..875192bf72cb 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
| @@ -339,12 +339,10 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
| 339 | 339 | ||
| 340 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) | 340 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) |
| 341 | 341 | ||
| 342 | #ifndef __ASSEMBLY__ | ||
| 342 | /* Indicate that x86 has its own track and untrack pfn vma functions */ | 343 | /* Indicate that x86 has its own track and untrack pfn vma functions */ |
| 343 | #define track_pfn_vma_new track_pfn_vma_new | 344 | #define __HAVE_PFNMAP_TRACKING |
| 344 | #define track_pfn_vma_copy track_pfn_vma_copy | ||
| 345 | #define untrack_pfn_vma untrack_pfn_vma | ||
| 346 | 345 | ||
| 347 | #ifndef __ASSEMBLY__ | ||
| 348 | #define __HAVE_PHYS_MEM_ACCESS_PROT | 346 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 349 | struct file; | 347 | struct file; |
| 350 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 348 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b84633801fb6..72ebe91005a8 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -293,6 +293,52 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
| 293 | #define arch_flush_lazy_cpu_mode() do {} while (0) | 293 | #define arch_flush_lazy_cpu_mode() do {} while (0) |
| 294 | #endif | 294 | #endif |
| 295 | 295 | ||
| 296 | #ifndef __HAVE_PFNMAP_TRACKING | ||
| 297 | /* | ||
| 298 | * Interface that can be used by architecture code to keep track of | ||
| 299 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
| 300 | * | ||
| 301 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | ||
| 302 | * for physical range indicated by pfn and size. | ||
| 303 | */ | ||
| 304 | static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | ||
| 305 | unsigned long pfn, unsigned long size) | ||
| 306 | { | ||
| 307 | return 0; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Interface that can be used by architecture code to keep track of | ||
| 312 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
| 313 | * | ||
| 314 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | ||
| 315 | * copied through copy_page_range(). | ||
| 316 | */ | ||
| 317 | static inline int track_pfn_vma_copy(struct vm_area_struct *vma) | ||
| 318 | { | ||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* | ||
| 323 | * Interface that can be used by architecture code to keep track of | ||
| 324 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
| 325 | * | ||
| 326 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | ||
| 327 | * untrack can be called for a specific region indicated by pfn and size or | ||
| 328 | * can be for the entire vma (in which case size can be zero). | ||
| 329 | */ | ||
| 330 | static inline void untrack_pfn_vma(struct vm_area_struct *vma, | ||
| 331 | unsigned long pfn, unsigned long size) | ||
| 332 | { | ||
| 333 | } | ||
| 334 | #else | ||
| 335 | extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | ||
| 336 | unsigned long pfn, unsigned long size); | ||
| 337 | extern int track_pfn_vma_copy(struct vm_area_struct *vma); | ||
| 338 | extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | ||
| 339 | unsigned long size); | ||
| 340 | #endif | ||
| 341 | |||
| 296 | #endif /* !__ASSEMBLY__ */ | 342 | #endif /* !__ASSEMBLY__ */ |
| 297 | 343 | ||
| 298 | #endif /* _ASM_GENERIC_PGTABLE_H */ | 344 | #endif /* _ASM_GENERIC_PGTABLE_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 36f9b3fa5e15..d3ddd735e375 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -163,12 +163,6 @@ static inline int is_pfn_mapping(struct vm_area_struct *vma) | |||
| 163 | return (vma->vm_flags & VM_PFNMAP); | 163 | return (vma->vm_flags & VM_PFNMAP); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | ||
| 167 | unsigned long pfn, unsigned long size); | ||
| 168 | extern int track_pfn_vma_copy(struct vm_area_struct *vma); | ||
| 169 | extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | ||
| 170 | unsigned long size); | ||
| 171 | |||
| 172 | /* | 166 | /* |
| 173 | * vm_fault is filled by the the pagefault handler and passed to the vma's | 167 | * vm_fault is filled by the the pagefault handler and passed to the vma's |
| 174 | * ->fault function. The vma's ->fault is responsible for returning a bitmask | 168 | * ->fault function. The vma's ->fault is responsible for returning a bitmask |
diff --git a/mm/memory.c b/mm/memory.c index 6b29f39a5a3e..f01b7eed6e16 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -99,50 +99,6 @@ int randomize_va_space __read_mostly = | |||
| 99 | 2; | 99 | 2; |
| 100 | #endif | 100 | #endif |
| 101 | 101 | ||
| 102 | #ifndef track_pfn_vma_new | ||
| 103 | /* | ||
| 104 | * Interface that can be used by architecture code to keep track of | ||
| 105 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
| 106 | * | ||
| 107 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | ||
| 108 | * for physical range indicated by pfn and size. | ||
| 109 | */ | ||
| 110 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | ||
| 111 | unsigned long pfn, unsigned long size) | ||
| 112 | { | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | #endif | ||
| 116 | |||
| 117 | #ifndef track_pfn_vma_copy | ||
| 118 | /* | ||
| 119 | * Interface that can be used by architecture code to keep track of | ||
| 120 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
| 121 | * | ||
| 122 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | ||
| 123 | * copied through copy_page_range(). | ||
| 124 | */ | ||
| 125 | int track_pfn_vma_copy(struct vm_area_struct *vma) | ||
| 126 | { | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | #endif | ||
| 130 | |||
| 131 | #ifndef untrack_pfn_vma | ||
| 132 | /* | ||
| 133 | * Interface that can be used by architecture code to keep track of | ||
| 134 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
| 135 | * | ||
| 136 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | ||
| 137 | * untrack can be called for a specific region indicated by pfn and size or | ||
| 138 | * can be for the entire vma (in which case size can be zero). | ||
| 139 | */ | ||
| 140 | void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | ||
| 141 | unsigned long size) | ||
| 142 | { | ||
| 143 | } | ||
| 144 | #endif | ||
| 145 | |||
| 146 | static int __init disable_randmaps(char *s) | 102 | static int __init disable_randmaps(char *s) |
| 147 | { | 103 | { |
| 148 | randomize_va_space = 0; | 104 | randomize_va_space = 0; |
| @@ -713,7 +669,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 713 | if (is_vm_hugetlb_page(vma)) | 669 | if (is_vm_hugetlb_page(vma)) |
| 714 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | 670 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
| 715 | 671 | ||
| 716 | if (is_pfn_mapping(vma)) { | 672 | if (unlikely(is_pfn_mapping(vma))) { |
| 717 | /* | 673 | /* |
| 718 | * We do not free on error cases below as remove_vma | 674 | * We do not free on error cases below as remove_vma |
| 719 | * gets called on error from higher level routine | 675 | * gets called on error from higher level routine |
| @@ -969,7 +925,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, | |||
| 969 | if (vma->vm_flags & VM_ACCOUNT) | 925 | if (vma->vm_flags & VM_ACCOUNT) |
| 970 | *nr_accounted += (end - start) >> PAGE_SHIFT; | 926 | *nr_accounted += (end - start) >> PAGE_SHIFT; |
| 971 | 927 | ||
| 972 | if (is_pfn_mapping(vma)) | 928 | if (unlikely(is_pfn_mapping(vma))) |
| 973 | untrack_pfn_vma(vma, 0, 0); | 929 | untrack_pfn_vma(vma, 0, 0); |
| 974 | 930 | ||
| 975 | while (start != end) { | 931 | while (start != end) { |
