aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mprotect.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 20:20:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 20:20:12 -0500
commit39cf275a1a18ba3c7eb9b986c5c9b35b57332798 (patch)
tree40b119ca9d2fbaf8128d3fa25f4c64669002b0c0 /mm/mprotect.c
parentad5d69899e52792671c1aa6c7360464c7edfe09c (diff)
parente5137b50a0640009fd63a3e65c14bc6e1be8796a (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The main changes in this cycle are: - (much) improved CONFIG_NUMA_BALANCING support from Mel Gorman, Rik van Riel, Peter Zijlstra et al. Yay! - optimize preemption counter handling: merge the NEED_RESCHED flag into the preempt_count variable, by Peter Zijlstra. - wait.h fixes and code reorganization from Peter Zijlstra - cfs_bandwidth fixes from Ben Segall - SMP load-balancer cleanups from Peter Zijstra - idle balancer improvements from Jason Low - other fixes and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (129 commits) ftrace, sched: Add TRACE_FLAG_PREEMPT_RESCHED stop_machine: Fix race between stop_two_cpus() and stop_cpus() sched: Remove unnecessary iteration over sched domains to update nr_busy_cpus sched: Fix asymmetric scheduling for POWER7 sched: Move completion code from core.c to completion.c sched: Move wait code from core.c to wait.c sched: Move wait.c into kernel/sched/ sched/wait: Fix __wait_event_interruptible_lock_irq_timeout() sched: Avoid throttle_cfs_rq() racing with period_timer stopping sched: Guarantee new group-entities always have weight sched: Fix hrtimer_cancel()/rq->lock deadlock sched: Fix cfs_bandwidth misuse of hrtimer_expires_remaining sched: Fix race on toggling cfs_bandwidth_used sched: Remove extra put_online_cpus() inside sched_setaffinity() sched/rt: Fix task_tick_rt() comment sched/wait: Fix build breakage sched/wait: Introduce prepare_to_wait_event() sched/wait: Add ___wait_cond_timeout() to wait_event*_timeout() too sched: Remove get_online_cpus() usage sched: Fix race in migrate_swap_stop() ...
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c65
1 files changed, 19 insertions, 46 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 412ba2b7326a..a597f2ffcd6f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,14 +37,12 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
37 37
38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39 unsigned long addr, unsigned long end, pgprot_t newprot, 39 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable, int prot_numa, bool *ret_all_same_node) 40 int dirty_accountable, int prot_numa)
41{ 41{
42 struct mm_struct *mm = vma->vm_mm; 42 struct mm_struct *mm = vma->vm_mm;
43 pte_t *pte, oldpte; 43 pte_t *pte, oldpte;
44 spinlock_t *ptl; 44 spinlock_t *ptl;
45 unsigned long pages = 0; 45 unsigned long pages = 0;
46 bool all_same_node = true;
47 int last_nid = -1;
48 46
49 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 47 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
50 arch_enter_lazy_mmu_mode(); 48 arch_enter_lazy_mmu_mode();
@@ -63,15 +61,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
63 61
64 page = vm_normal_page(vma, addr, oldpte); 62 page = vm_normal_page(vma, addr, oldpte);
65 if (page) { 63 if (page) {
66 int this_nid = page_to_nid(page); 64 if (!pte_numa(oldpte)) {
67 if (last_nid == -1)
68 last_nid = this_nid;
69 if (last_nid != this_nid)
70 all_same_node = false;
71
72 /* only check non-shared pages */
73 if (!pte_numa(oldpte) &&
74 page_mapcount(page) == 1) {
75 ptent = pte_mknuma(ptent); 65 ptent = pte_mknuma(ptent);
76 updated = true; 66 updated = true;
77 } 67 }
@@ -104,33 +94,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
104 if (pte_swp_soft_dirty(oldpte)) 94 if (pte_swp_soft_dirty(oldpte))
105 newpte = pte_swp_mksoft_dirty(newpte); 95 newpte = pte_swp_mksoft_dirty(newpte);
106 set_pte_at(mm, addr, pte, newpte); 96 set_pte_at(mm, addr, pte, newpte);
97
98 pages++;
107 } 99 }
108 pages++;
109 } 100 }
110 } while (pte++, addr += PAGE_SIZE, addr != end); 101 } while (pte++, addr += PAGE_SIZE, addr != end);
111 arch_leave_lazy_mmu_mode(); 102 arch_leave_lazy_mmu_mode();
112 pte_unmap_unlock(pte - 1, ptl); 103 pte_unmap_unlock(pte - 1, ptl);
113 104
114 *ret_all_same_node = all_same_node;
115 return pages; 105 return pages;
116} 106}
117 107
118#ifdef CONFIG_NUMA_BALANCING
119static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
120 pmd_t *pmd)
121{
122 spin_lock(&mm->page_table_lock);
123 set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
124 spin_unlock(&mm->page_table_lock);
125}
126#else
127static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
128 pmd_t *pmd)
129{
130 BUG();
131}
132#endif /* CONFIG_NUMA_BALANCING */
133
134static inline unsigned long change_pmd_range(struct vm_area_struct *vma, 108static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
135 pud_t *pud, unsigned long addr, unsigned long end, 109 pud_t *pud, unsigned long addr, unsigned long end,
136 pgprot_t newprot, int dirty_accountable, int prot_numa) 110 pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -138,34 +112,33 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
138 pmd_t *pmd; 112 pmd_t *pmd;
139 unsigned long next; 113 unsigned long next;
140 unsigned long pages = 0; 114 unsigned long pages = 0;
141 bool all_same_node;
142 115
143 pmd = pmd_offset(pud, addr); 116 pmd = pmd_offset(pud, addr);
144 do { 117 do {
118 unsigned long this_pages;
119
145 next = pmd_addr_end(addr, end); 120 next = pmd_addr_end(addr, end);
146 if (pmd_trans_huge(*pmd)) { 121 if (pmd_trans_huge(*pmd)) {
147 if (next - addr != HPAGE_PMD_SIZE) 122 if (next - addr != HPAGE_PMD_SIZE)
148 split_huge_page_pmd(vma, addr, pmd); 123 split_huge_page_pmd(vma, addr, pmd);
149 else if (change_huge_pmd(vma, pmd, addr, newprot, 124 else {
150 prot_numa)) { 125 int nr_ptes = change_huge_pmd(vma, pmd, addr,
151 pages++; 126 newprot, prot_numa);
152 continue; 127
128 if (nr_ptes) {
129 if (nr_ptes == HPAGE_PMD_NR)
130 pages++;
131
132 continue;
133 }
153 } 134 }
154 /* fall through */ 135 /* fall through */
155 } 136 }
156 if (pmd_none_or_clear_bad(pmd)) 137 if (pmd_none_or_clear_bad(pmd))
157 continue; 138 continue;
158 pages += change_pte_range(vma, pmd, addr, next, newprot, 139 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
159 dirty_accountable, prot_numa, &all_same_node); 140 dirty_accountable, prot_numa);
160 141 pages += this_pages;
161 /*
162 * If we are changing protections for NUMA hinting faults then
163 * set pmd_numa if the examined pages were all on the same
164 * node. This allows a regular PMD to be handled as one fault
165 * and effectively batches the taking of the PTL
166 */
167 if (prot_numa && all_same_node)
168 change_pmd_protnuma(vma->vm_mm, addr, pmd);
169 } while (pmd++, addr = next, addr != end); 142 } while (pmd++, addr = next, addr != end);
170 143
171 return pages; 144 return pages;