aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mprotect.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-07 06:29:20 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:47:45 -0400
commit90572890d202527c366aa9489b32404e88a7c020 (patch)
tree0577f3b043e312f6d53e50105b236514f7df2455 /mm/mprotect.c
parente1dda8a797b59d7ec4b17e393152ec3273a552d5 (diff)
mm: numa: Change page last {nid,pid} into {cpu,pid}
Change the per page last fault tracking to use cpu,pid instead of nid,pid. This will allow us to try and lookup the alternate task more easily. Note that even though it is the cpu that is store in the page flags that the mpol_misplaced decision is still based on the node. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de [ Fixed build failure on 32-bit systems. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5aae39017d6d..9a74855f1241 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,14 +37,14 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
37 37
38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39 unsigned long addr, unsigned long end, pgprot_t newprot, 39 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid) 40 int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
41{ 41{
42 struct mm_struct *mm = vma->vm_mm; 42 struct mm_struct *mm = vma->vm_mm;
43 pte_t *pte, oldpte; 43 pte_t *pte, oldpte;
44 spinlock_t *ptl; 44 spinlock_t *ptl;
45 unsigned long pages = 0; 45 unsigned long pages = 0;
46 bool all_same_nidpid = true; 46 bool all_same_cpupid = true;
47 int last_nid = -1; 47 int last_cpu = -1;
48 int last_pid = -1; 48 int last_pid = -1;
49 49
50 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 50 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
@@ -64,17 +64,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
64 64
65 page = vm_normal_page(vma, addr, oldpte); 65 page = vm_normal_page(vma, addr, oldpte);
66 if (page) { 66 if (page) {
67 int nidpid = page_nidpid_last(page); 67 int cpupid = page_cpupid_last(page);
68 int this_nid = nidpid_to_nid(nidpid); 68 int this_cpu = cpupid_to_cpu(cpupid);
69 int this_pid = nidpid_to_pid(nidpid); 69 int this_pid = cpupid_to_pid(cpupid);
70 70
71 if (last_nid == -1) 71 if (last_cpu == -1)
72 last_nid = this_nid; 72 last_cpu = this_cpu;
73 if (last_pid == -1) 73 if (last_pid == -1)
74 last_pid = this_pid; 74 last_pid = this_pid;
75 if (last_nid != this_nid || 75 if (last_cpu != this_cpu ||
76 last_pid != this_pid) { 76 last_pid != this_pid) {
77 all_same_nidpid = false; 77 all_same_cpupid = false;
78 } 78 }
79 79
80 if (!pte_numa(oldpte)) { 80 if (!pte_numa(oldpte)) {
@@ -115,7 +115,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
115 arch_leave_lazy_mmu_mode(); 115 arch_leave_lazy_mmu_mode();
116 pte_unmap_unlock(pte - 1, ptl); 116 pte_unmap_unlock(pte - 1, ptl);
117 117
118 *ret_all_same_nidpid = all_same_nidpid; 118 *ret_all_same_cpupid = all_same_cpupid;
119 return pages; 119 return pages;
120} 120}
121 121
@@ -142,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
142 pmd_t *pmd; 142 pmd_t *pmd;
143 unsigned long next; 143 unsigned long next;
144 unsigned long pages = 0; 144 unsigned long pages = 0;
145 bool all_same_nidpid; 145 bool all_same_cpupid;
146 146
147 pmd = pmd_offset(pud, addr); 147 pmd = pmd_offset(pud, addr);
148 do { 148 do {
@@ -168,7 +168,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
168 if (pmd_none_or_clear_bad(pmd)) 168 if (pmd_none_or_clear_bad(pmd))
169 continue; 169 continue;
170 this_pages = change_pte_range(vma, pmd, addr, next, newprot, 170 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
171 dirty_accountable, prot_numa, &all_same_nidpid); 171 dirty_accountable, prot_numa, &all_same_cpupid);
172 pages += this_pages; 172 pages += this_pages;
173 173
174 /* 174 /*
@@ -177,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
177 * node. This allows a regular PMD to be handled as one fault 177 * node. This allows a regular PMD to be handled as one fault
178 * and effectively batches the taking of the PTL 178 * and effectively batches the taking of the PTL
179 */ 179 */
180 if (prot_numa && this_pages && all_same_nidpid) 180 if (prot_numa && this_pages && all_same_cpupid)
181 change_pmd_protnuma(vma->vm_mm, addr, pmd); 181 change_pmd_protnuma(vma->vm_mm, addr, pmd);
182 } while (pmd++, addr = next, addr != end); 182 } while (pmd++, addr = next, addr != end);
183 183