aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2016-08-17 11:37:04 -0400
committerIngo Molnar <mingo@kernel.org>2016-08-18 04:03:50 -0400
commitbdfaa2eecd5f6ca0cb5cff2bc7a974a15a2fd21b (patch)
tree80cf1c16c2fde249dcf335b65e8e60069b2bc131 /kernel/events
parentbc06f00dbd71a839228f382540a834c3963b9312 (diff)
uprobes: Rename the "struct page *" args of __replace_page()
Purely cosmetic, no changes in the compiled code. Perhaps it is just me but I can hardly read __replace_page() because I can't distinguish "page" from "kpage" and because I need to look at the caller to to ensure that, say, kpage is really the new page and the code is correct. Rename them to old_page and new_page, this matches the caller. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Brenden Blanco <bblanco@plumgrid.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Link: http://lkml.kernel.org/r/20160817153704.GC29724@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 8c50276b60d1..d4129bb05e5d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -150,7 +150,7 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
150 * Returns 0 on success, -EFAULT on failure. 150 * Returns 0 on success, -EFAULT on failure.
151 */ 151 */
152static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 152static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
153 struct page *page, struct page *kpage) 153 struct page *old_page, struct page *new_page)
154{ 154{
155 struct mm_struct *mm = vma->vm_mm; 155 struct mm_struct *mm = vma->vm_mm;
156 spinlock_t *ptl; 156 spinlock_t *ptl;
@@ -161,49 +161,49 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
161 const unsigned long mmun_end = addr + PAGE_SIZE; 161 const unsigned long mmun_end = addr + PAGE_SIZE;
162 struct mem_cgroup *memcg; 162 struct mem_cgroup *memcg;
163 163
164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg, 164 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
165 false); 165 false);
166 if (err) 166 if (err)
167 return err; 167 return err;
168 168
169 /* For try_to_free_swap() and munlock_vma_page() below */ 169 /* For try_to_free_swap() and munlock_vma_page() below */
170 lock_page(page); 170 lock_page(old_page);
171 171
172 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 172 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
173 err = -EAGAIN; 173 err = -EAGAIN;
174 ptep = page_check_address(page, mm, addr, &ptl, 0); 174 ptep = page_check_address(old_page, mm, addr, &ptl, 0);
175 if (!ptep) { 175 if (!ptep) {
176 mem_cgroup_cancel_charge(kpage, memcg, false); 176 mem_cgroup_cancel_charge(new_page, memcg, false);
177 goto unlock; 177 goto unlock;
178 } 178 }
179 179
180 get_page(kpage); 180 get_page(new_page);
181 page_add_new_anon_rmap(kpage, vma, addr, false); 181 page_add_new_anon_rmap(new_page, vma, addr, false);
182 mem_cgroup_commit_charge(kpage, memcg, false, false); 182 mem_cgroup_commit_charge(new_page, memcg, false, false);
183 lru_cache_add_active_or_unevictable(kpage, vma); 183 lru_cache_add_active_or_unevictable(new_page, vma);
184 184
185 if (!PageAnon(page)) { 185 if (!PageAnon(old_page)) {
186 dec_mm_counter(mm, mm_counter_file(page)); 186 dec_mm_counter(mm, mm_counter_file(old_page));
187 inc_mm_counter(mm, MM_ANONPAGES); 187 inc_mm_counter(mm, MM_ANONPAGES);
188 } 188 }
189 189
190 flush_cache_page(vma, addr, pte_pfn(*ptep)); 190 flush_cache_page(vma, addr, pte_pfn(*ptep));
191 ptep_clear_flush_notify(vma, addr, ptep); 191 ptep_clear_flush_notify(vma, addr, ptep);
192 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 192 set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
193 193
194 page_remove_rmap(page, false); 194 page_remove_rmap(old_page, false);
195 if (!page_mapped(page)) 195 if (!page_mapped(old_page))
196 try_to_free_swap(page); 196 try_to_free_swap(old_page);
197 pte_unmap_unlock(ptep, ptl); 197 pte_unmap_unlock(ptep, ptl);
198 198
199 if (vma->vm_flags & VM_LOCKED) 199 if (vma->vm_flags & VM_LOCKED)
200 munlock_vma_page(page); 200 munlock_vma_page(old_page);
201 put_page(page); 201 put_page(old_page);
202 202
203 err = 0; 203 err = 0;
204 unlock: 204 unlock:
205 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 205 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
206 unlock_page(page); 206 unlock_page(old_page);
207 return err; 207 return err;
208} 208}
209 209