summaryrefslogtreecommitdiffstats
path: root/kernel/events/uprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events/uprobes.c')
-rw-r--r--kernel/events/uprobes.c81
1 files changed, 62 insertions, 19 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 84fa00497c49..94d38a39d72e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -26,6 +26,7 @@
26#include <linux/percpu-rwsem.h> 26#include <linux/percpu-rwsem.h>
27#include <linux/task_work.h> 27#include <linux/task_work.h>
28#include <linux/shmem_fs.h> 28#include <linux/shmem_fs.h>
29#include <linux/khugepaged.h>
29 30
30#include <linux/uprobes.h> 31#include <linux/uprobes.h>
31 32
@@ -143,17 +144,19 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
143 * 144 *
144 * @vma: vma that holds the pte pointing to page 145 * @vma: vma that holds the pte pointing to page
145 * @addr: address the old @page is mapped at 146 * @addr: address the old @page is mapped at
146 * @page: the cowed page we are replacing by kpage 147 * @old_page: the page we are replacing by new_page
147 * @kpage: the modified page we replace page by 148 * @new_page: the modified page we replace page by
148 * 149 *
149 * Returns 0 on success, -EFAULT on failure. 150 * If @new_page is NULL, only unmap @old_page.
151 *
152 * Returns 0 on success, negative error code otherwise.
150 */ 153 */
151static int __replace_page(struct vm_area_struct *vma, unsigned long addr, 154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
152 struct page *old_page, struct page *new_page) 155 struct page *old_page, struct page *new_page)
153{ 156{
154 struct mm_struct *mm = vma->vm_mm; 157 struct mm_struct *mm = vma->vm_mm;
155 struct page_vma_mapped_walk pvmw = { 158 struct page_vma_mapped_walk pvmw = {
156 .page = old_page, 159 .page = compound_head(old_page),
157 .vma = vma, 160 .vma = vma,
158 .address = addr, 161 .address = addr,
159 }; 162 };
@@ -164,12 +167,12 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
164 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, 167 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
165 addr + PAGE_SIZE); 168 addr + PAGE_SIZE);
166 169
167 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); 170 if (new_page) {
168 171 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
169 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, 172 &memcg, false);
170 false); 173 if (err)
171 if (err) 174 return err;
172 return err; 175 }
173 176
174 /* For try_to_free_swap() and munlock_vma_page() below */ 177 /* For try_to_free_swap() and munlock_vma_page() below */
175 lock_page(old_page); 178 lock_page(old_page);
@@ -177,15 +180,20 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
177 mmu_notifier_invalidate_range_start(&range); 180 mmu_notifier_invalidate_range_start(&range);
178 err = -EAGAIN; 181 err = -EAGAIN;
179 if (!page_vma_mapped_walk(&pvmw)) { 182 if (!page_vma_mapped_walk(&pvmw)) {
180 mem_cgroup_cancel_charge(new_page, memcg, false); 183 if (new_page)
184 mem_cgroup_cancel_charge(new_page, memcg, false);
181 goto unlock; 185 goto unlock;
182 } 186 }
183 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 187 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
184 188
185 get_page(new_page); 189 if (new_page) {
186 page_add_new_anon_rmap(new_page, vma, addr, false); 190 get_page(new_page);
187 mem_cgroup_commit_charge(new_page, memcg, false, false); 191 page_add_new_anon_rmap(new_page, vma, addr, false);
188 lru_cache_add_active_or_unevictable(new_page, vma); 192 mem_cgroup_commit_charge(new_page, memcg, false, false);
193 lru_cache_add_active_or_unevictable(new_page, vma);
194 } else
195 /* no new page, just dec_mm_counter for old_page */
196 dec_mm_counter(mm, MM_ANONPAGES);
189 197
190 if (!PageAnon(old_page)) { 198 if (!PageAnon(old_page)) {
191 dec_mm_counter(mm, mm_counter_file(old_page)); 199 dec_mm_counter(mm, mm_counter_file(old_page));
@@ -194,8 +202,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
194 202
195 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); 203 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
196 ptep_clear_flush_notify(vma, addr, pvmw.pte); 204 ptep_clear_flush_notify(vma, addr, pvmw.pte);
197 set_pte_at_notify(mm, addr, pvmw.pte, 205 if (new_page)
198 mk_pte(new_page, vma->vm_page_prot)); 206 set_pte_at_notify(mm, addr, pvmw.pte,
207 mk_pte(new_page, vma->vm_page_prot));
199 208
200 page_remove_rmap(old_page, false); 209 page_remove_rmap(old_page, false);
201 if (!page_mapped(old_page)) 210 if (!page_mapped(old_page))
@@ -464,6 +473,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
464 struct page *old_page, *new_page; 473 struct page *old_page, *new_page;
465 struct vm_area_struct *vma; 474 struct vm_area_struct *vma;
466 int ret, is_register, ref_ctr_updated = 0; 475 int ret, is_register, ref_ctr_updated = 0;
476 bool orig_page_huge = false;
467 477
468 is_register = is_swbp_insn(&opcode); 478 is_register = is_swbp_insn(&opcode);
469 uprobe = container_of(auprobe, struct uprobe, arch); 479 uprobe = container_of(auprobe, struct uprobe, arch);
@@ -471,7 +481,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
471retry: 481retry:
472 /* Read the page with vaddr into memory */ 482 /* Read the page with vaddr into memory */
473 ret = get_user_pages_remote(NULL, mm, vaddr, 1, 483 ret = get_user_pages_remote(NULL, mm, vaddr, 1,
474 FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL); 484 FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
475 if (ret <= 0) 485 if (ret <= 0)
476 return ret; 486 return ret;
477 487
@@ -488,6 +498,10 @@ retry:
488 ref_ctr_updated = 1; 498 ref_ctr_updated = 1;
489 } 499 }
490 500
501 ret = 0;
502 if (!is_register && !PageAnon(old_page))
503 goto put_old;
504
491 ret = anon_vma_prepare(vma); 505 ret = anon_vma_prepare(vma);
492 if (ret) 506 if (ret)
493 goto put_old; 507 goto put_old;
@@ -501,8 +515,33 @@ retry:
501 copy_highpage(new_page, old_page); 515 copy_highpage(new_page, old_page);
502 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 516 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
503 517
518 if (!is_register) {
519 struct page *orig_page;
520 pgoff_t index;
521
522 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
523
524 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
525 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
526 index);
527
528 if (orig_page) {
529 if (PageUptodate(orig_page) &&
530 pages_identical(new_page, orig_page)) {
531 /* let go new_page */
532 put_page(new_page);
533 new_page = NULL;
534
535 if (PageCompound(orig_page))
536 orig_page_huge = true;
537 }
538 put_page(orig_page);
539 }
540 }
541
504 ret = __replace_page(vma, vaddr, old_page, new_page); 542 ret = __replace_page(vma, vaddr, old_page, new_page);
505 put_page(new_page); 543 if (new_page)
544 put_page(new_page);
506put_old: 545put_old:
507 put_page(old_page); 546 put_page(old_page);
508 547
@@ -513,6 +552,10 @@ put_old:
513 if (ret && is_register && ref_ctr_updated) 552 if (ret && is_register && ref_ctr_updated)
514 update_ref_ctr(uprobe, mm, -1); 553 update_ref_ctr(uprobe, mm, -1);
515 554
555 /* try collapse pmd for compound page */
556 if (!ret && orig_page_huge)
557 collapse_pte_mapped_thp(mm, vaddr);
558
516 return ret; 559 return ret;
517} 560}
518 561