aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-12-19 16:47:29 -0500
committerH. Peter Anvin <hpa@zytor.com>2008-12-19 18:40:30 -0500
commit34801ba9bf0381fcf0e2b08179d2c07f2c6ede74 (patch)
treee14d979cb9abf9220d0c60b04505851805975398 /mm
parent982d789ab76c8a11426852fec2fdf2f412e21c0c (diff)
x86: PAT: move track untrack pfnmap stubs to asm-generic
Impact: Cleanup and branch hints only. Move the track and untrack pfn stub routines from memory.c to asm-generic. Also add unlikely to pfnmap related calls in fork and exit path. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c48
1 files changed, 2 insertions, 46 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6b29f39a5a3e..f01b7eed6e16 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -99,50 +99,6 @@ int randomize_va_space __read_mostly =
99 2; 99 2;
100#endif 100#endif
101 101
102#ifndef track_pfn_vma_new
103/*
104 * Interface that can be used by architecture code to keep track of
105 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
106 *
107 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
108 * for physical range indicated by pfn and size.
109 */
110int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
111 unsigned long pfn, unsigned long size)
112{
113 return 0;
114}
115#endif
116
117#ifndef track_pfn_vma_copy
118/*
119 * Interface that can be used by architecture code to keep track of
120 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
121 *
122 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
123 * copied through copy_page_range().
124 */
125int track_pfn_vma_copy(struct vm_area_struct *vma)
126{
127 return 0;
128}
129#endif
130
131#ifndef untrack_pfn_vma
132/*
133 * Interface that can be used by architecture code to keep track of
134 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
135 *
136 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
137 * untrack can be called for a specific region indicated by pfn and size or
138 * can be for the entire vma (in which case size can be zero).
139 */
140void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
141 unsigned long size)
142{
143}
144#endif
145
146static int __init disable_randmaps(char *s) 102static int __init disable_randmaps(char *s)
147{ 103{
148 randomize_va_space = 0; 104 randomize_va_space = 0;
@@ -713,7 +669,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
713 if (is_vm_hugetlb_page(vma)) 669 if (is_vm_hugetlb_page(vma))
714 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
715 671
716 if (is_pfn_mapping(vma)) { 672 if (unlikely(is_pfn_mapping(vma))) {
717 /* 673 /*
718 * We do not free on error cases below as remove_vma 674 * We do not free on error cases below as remove_vma
719 * gets called on error from higher level routine 675 * gets called on error from higher level routine
@@ -969,7 +925,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
969 if (vma->vm_flags & VM_ACCOUNT) 925 if (vma->vm_flags & VM_ACCOUNT)
970 *nr_accounted += (end - start) >> PAGE_SHIFT; 926 *nr_accounted += (end - start) >> PAGE_SHIFT;
971 927
972 if (is_pfn_mapping(vma)) 928 if (unlikely(is_pfn_mapping(vma)))
973 untrack_pfn_vma(vma, 0, 0); 929 untrack_pfn_vma(vma, 0, 0);
974 930
975 while (start != end) { 931 while (start != end) {