aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/uprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events/uprobes.c')
-rw-r--r--kernel/events/uprobes.c345
1 files changed, 162 insertions, 183 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 98256bc71ee1..5cc4e7e42e68 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -78,15 +78,23 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
78 */ 78 */
79static atomic_t uprobe_events = ATOMIC_INIT(0); 79static atomic_t uprobe_events = ATOMIC_INIT(0);
80 80
81/* Have a copy of original instruction */
82#define UPROBE_COPY_INSN 0
83/* Dont run handlers when first register/ last unregister in progress*/
84#define UPROBE_RUN_HANDLER 1
85/* Can skip singlestep */
86#define UPROBE_SKIP_SSTEP 2
87
81struct uprobe { 88struct uprobe {
82 struct rb_node rb_node; /* node in the rb tree */ 89 struct rb_node rb_node; /* node in the rb tree */
83 atomic_t ref; 90 atomic_t ref;
84 struct rw_semaphore consumer_rwsem; 91 struct rw_semaphore consumer_rwsem;
92 struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */
85 struct list_head pending_list; 93 struct list_head pending_list;
86 struct uprobe_consumer *consumers; 94 struct uprobe_consumer *consumers;
87 struct inode *inode; /* Also hold a ref to inode */ 95 struct inode *inode; /* Also hold a ref to inode */
88 loff_t offset; 96 loff_t offset;
89 int flags; 97 unsigned long flags;
90 struct arch_uprobe arch; 98 struct arch_uprobe arch;
91}; 99};
92 100
@@ -100,17 +108,12 @@ struct uprobe {
100 */ 108 */
101static bool valid_vma(struct vm_area_struct *vma, bool is_register) 109static bool valid_vma(struct vm_area_struct *vma, bool is_register)
102{ 110{
103 if (!vma->vm_file) 111 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
104 return false;
105
106 if (!is_register)
107 return true;
108 112
109 if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) 113 if (is_register)
110 == (VM_READ|VM_EXEC)) 114 flags |= VM_WRITE;
111 return true;
112 115
113 return false; 116 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
114} 117}
115 118
116static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) 119static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
@@ -193,19 +196,44 @@ bool __weak is_swbp_insn(uprobe_opcode_t *insn)
193 return *insn == UPROBE_SWBP_INSN; 196 return *insn == UPROBE_SWBP_INSN;
194} 197}
195 198
199static void copy_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *opcode)
200{
201 void *kaddr = kmap_atomic(page);
202 memcpy(opcode, kaddr + (vaddr & ~PAGE_MASK), UPROBE_SWBP_INSN_SIZE);
203 kunmap_atomic(kaddr);
204}
205
206static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
207{
208 uprobe_opcode_t old_opcode;
209 bool is_swbp;
210
211 copy_opcode(page, vaddr, &old_opcode);
212 is_swbp = is_swbp_insn(&old_opcode);
213
214 if (is_swbp_insn(new_opcode)) {
215 if (is_swbp) /* register: already installed? */
216 return 0;
217 } else {
218 if (!is_swbp) /* unregister: was it changed by us? */
219 return 0;
220 }
221
222 return 1;
223}
224
196/* 225/*
197 * NOTE: 226 * NOTE:
198 * Expect the breakpoint instruction to be the smallest size instruction for 227 * Expect the breakpoint instruction to be the smallest size instruction for
199 * the architecture. If an arch has variable length instruction and the 228 * the architecture. If an arch has variable length instruction and the
200 * breakpoint instruction is not of the smallest length instruction 229 * breakpoint instruction is not of the smallest length instruction
201 * supported by that architecture then we need to modify read_opcode / 230 * supported by that architecture then we need to modify is_swbp_at_addr and
202 * write_opcode accordingly. This would never be a problem for archs that 231 * write_opcode accordingly. This would never be a problem for archs that
203 * have fixed length instructions. 232 * have fixed length instructions.
204 */ 233 */
205 234
206/* 235/*
207 * write_opcode - write the opcode at a given virtual address. 236 * write_opcode - write the opcode at a given virtual address.
208 * @auprobe: arch breakpointing information.
209 * @mm: the probed process address space. 237 * @mm: the probed process address space.
210 * @vaddr: the virtual address to store the opcode. 238 * @vaddr: the virtual address to store the opcode.
211 * @opcode: opcode to be written at @vaddr. 239 * @opcode: opcode to be written at @vaddr.
@@ -216,8 +244,8 @@ bool __weak is_swbp_insn(uprobe_opcode_t *insn)
216 * For mm @mm, write the opcode at @vaddr. 244 * For mm @mm, write the opcode at @vaddr.
217 * Return 0 (success) or a negative errno. 245 * Return 0 (success) or a negative errno.
218 */ 246 */
219static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, 247static int write_opcode(struct mm_struct *mm, unsigned long vaddr,
220 unsigned long vaddr, uprobe_opcode_t opcode) 248 uprobe_opcode_t opcode)
221{ 249{
222 struct page *old_page, *new_page; 250 struct page *old_page, *new_page;
223 void *vaddr_old, *vaddr_new; 251 void *vaddr_old, *vaddr_new;
@@ -226,10 +254,14 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
226 254
227retry: 255retry:
228 /* Read the page with vaddr into memory */ 256 /* Read the page with vaddr into memory */
229 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); 257 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
230 if (ret <= 0) 258 if (ret <= 0)
231 return ret; 259 return ret;
232 260
261 ret = verify_opcode(old_page, vaddr, &opcode);
262 if (ret <= 0)
263 goto put_old;
264
233 ret = -ENOMEM; 265 ret = -ENOMEM;
234 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 266 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
235 if (!new_page) 267 if (!new_page)
@@ -264,63 +296,6 @@ put_old:
264} 296}
265 297
266/** 298/**
267 * read_opcode - read the opcode at a given virtual address.
268 * @mm: the probed process address space.
269 * @vaddr: the virtual address to read the opcode.
270 * @opcode: location to store the read opcode.
271 *
272 * Called with mm->mmap_sem held (for read and with a reference to
273 * mm.
274 *
275 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
276 * Return 0 (success) or a negative errno.
277 */
278static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
279{
280 struct page *page;
281 void *vaddr_new;
282 int ret;
283
284 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
285 if (ret <= 0)
286 return ret;
287
288 vaddr_new = kmap_atomic(page);
289 vaddr &= ~PAGE_MASK;
290 memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
291 kunmap_atomic(vaddr_new);
292
293 put_page(page);
294
295 return 0;
296}
297
298static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
299{
300 uprobe_opcode_t opcode;
301 int result;
302
303 if (current->mm == mm) {
304 pagefault_disable();
305 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
306 sizeof(opcode));
307 pagefault_enable();
308
309 if (likely(result == 0))
310 goto out;
311 }
312
313 result = read_opcode(mm, vaddr, &opcode);
314 if (result)
315 return result;
316out:
317 if (is_swbp_insn(&opcode))
318 return 1;
319
320 return 0;
321}
322
323/**
324 * set_swbp - store breakpoint at a given address. 299 * set_swbp - store breakpoint at a given address.
325 * @auprobe: arch specific probepoint information. 300 * @auprobe: arch specific probepoint information.
326 * @mm: the probed process address space. 301 * @mm: the probed process address space.
@@ -331,18 +306,7 @@ out:
331 */ 306 */
332int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 307int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
333{ 308{
334 int result; 309 return write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
335 /*
336 * See the comment near uprobes_hash().
337 */
338 result = is_swbp_at_addr(mm, vaddr);
339 if (result == 1)
340 return 0;
341
342 if (result)
343 return result;
344
345 return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
346} 310}
347 311
348/** 312/**
@@ -357,16 +321,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
357int __weak 321int __weak
358set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) 322set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
359{ 323{
360 int result; 324 return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
361
362 result = is_swbp_at_addr(mm, vaddr);
363 if (!result)
364 return -EINVAL;
365
366 if (result != 1)
367 return result;
368
369 return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
370} 325}
371 326
372static int match_uprobe(struct uprobe *l, struct uprobe *r) 327static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -473,7 +428,7 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
473 spin_unlock(&uprobes_treelock); 428 spin_unlock(&uprobes_treelock);
474 429
475 /* For now assume that the instruction need not be single-stepped */ 430 /* For now assume that the instruction need not be single-stepped */
476 uprobe->flags |= UPROBE_SKIP_SSTEP; 431 __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
477 432
478 return u; 433 return u;
479} 434}
@@ -495,6 +450,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
495 uprobe->inode = igrab(inode); 450 uprobe->inode = igrab(inode);
496 uprobe->offset = offset; 451 uprobe->offset = offset;
497 init_rwsem(&uprobe->consumer_rwsem); 452 init_rwsem(&uprobe->consumer_rwsem);
453 mutex_init(&uprobe->copy_mutex);
498 454
499 /* add to uprobes_tree, sorted on inode:offset */ 455 /* add to uprobes_tree, sorted on inode:offset */
500 cur_uprobe = insert_uprobe(uprobe); 456 cur_uprobe = insert_uprobe(uprobe);
@@ -515,7 +471,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
515{ 471{
516 struct uprobe_consumer *uc; 472 struct uprobe_consumer *uc;
517 473
518 if (!(uprobe->flags & UPROBE_RUN_HANDLER)) 474 if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
519 return; 475 return;
520 476
521 down_read(&uprobe->consumer_rwsem); 477 down_read(&uprobe->consumer_rwsem);
@@ -621,29 +577,43 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
621 return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); 577 return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
622} 578}
623 579
624/* 580static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
625 * How mm->uprobes_state.count gets updated 581 struct mm_struct *mm, unsigned long vaddr)
626 * uprobe_mmap() increments the count if 582{
627 * - it successfully adds a breakpoint. 583 int ret = 0;
628 * - it cannot add a breakpoint, but sees that there is a underlying 584
629 * breakpoint (via a is_swbp_at_addr()). 585 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
630 * 586 return ret;
631 * uprobe_munmap() decrements the count if 587
632 * - it sees a underlying breakpoint, (via is_swbp_at_addr) 588 mutex_lock(&uprobe->copy_mutex);
633 * (Subsequent uprobe_unregister wouldnt find the breakpoint 589 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
634 * unless a uprobe_mmap kicks in, since the old vma would be 590 goto out;
635 * dropped just after uprobe_munmap.) 591
636 * 592 ret = copy_insn(uprobe, file);
637 * uprobe_register increments the count if: 593 if (ret)
638 * - it successfully adds a breakpoint. 594 goto out;
639 * 595
640 * uprobe_unregister decrements the count if: 596 ret = -ENOTSUPP;
641 * - it sees a underlying breakpoint and removes successfully. 597 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
642 * (via is_swbp_at_addr) 598 goto out;
643 * (Subsequent uprobe_munmap wouldnt find the breakpoint 599
644 * since there is no underlying breakpoint after the 600 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
645 * breakpoint removal.) 601 if (ret)
646 */ 602 goto out;
603
604 /* write_opcode() assumes we don't cross page boundary */
605 BUG_ON((uprobe->offset & ~PAGE_MASK) +
606 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
607
608 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
609 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
610
611 out:
612 mutex_unlock(&uprobe->copy_mutex);
613
614 return ret;
615}
616
647static int 617static int
648install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, 618install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
649 struct vm_area_struct *vma, unsigned long vaddr) 619 struct vm_area_struct *vma, unsigned long vaddr)
@@ -661,24 +631,9 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
661 if (!uprobe->consumers) 631 if (!uprobe->consumers)
662 return 0; 632 return 0;
663 633
664 if (!(uprobe->flags & UPROBE_COPY_INSN)) { 634 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
665 ret = copy_insn(uprobe, vma->vm_file); 635 if (ret)
666 if (ret) 636 return ret;
667 return ret;
668
669 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
670 return -ENOTSUPP;
671
672 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
673 if (ret)
674 return ret;
675
676 /* write_opcode() assumes we don't cross page boundary */
677 BUG_ON((uprobe->offset & ~PAGE_MASK) +
678 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
679
680 uprobe->flags |= UPROBE_COPY_INSN;
681 }
682 637
683 /* 638 /*
684 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), 639 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
@@ -697,15 +652,15 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
697 return ret; 652 return ret;
698} 653}
699 654
700static void 655static int
701remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 656remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
702{ 657{
703 /* can happen if uprobe_register() fails */ 658 /* can happen if uprobe_register() fails */
704 if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) 659 if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
705 return; 660 return 0;
706 661
707 set_bit(MMF_RECALC_UPROBES, &mm->flags); 662 set_bit(MMF_RECALC_UPROBES, &mm->flags);
708 set_orig_insn(&uprobe->arch, mm, vaddr); 663 return set_orig_insn(&uprobe->arch, mm, vaddr);
709} 664}
710 665
711/* 666/*
@@ -820,7 +775,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
820 struct mm_struct *mm = info->mm; 775 struct mm_struct *mm = info->mm;
821 struct vm_area_struct *vma; 776 struct vm_area_struct *vma;
822 777
823 if (err) 778 if (err && is_register)
824 goto free; 779 goto free;
825 780
826 down_write(&mm->mmap_sem); 781 down_write(&mm->mmap_sem);
@@ -836,7 +791,7 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
836 if (is_register) 791 if (is_register)
837 err = install_breakpoint(uprobe, mm, vma, info->vaddr); 792 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
838 else 793 else
839 remove_breakpoint(uprobe, mm, info->vaddr); 794 err |= remove_breakpoint(uprobe, mm, info->vaddr);
840 795
841 unlock: 796 unlock:
842 up_write(&mm->mmap_sem); 797 up_write(&mm->mmap_sem);
@@ -893,13 +848,15 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
893 mutex_lock(uprobes_hash(inode)); 848 mutex_lock(uprobes_hash(inode));
894 uprobe = alloc_uprobe(inode, offset); 849 uprobe = alloc_uprobe(inode, offset);
895 850
896 if (uprobe && !consumer_add(uprobe, uc)) { 851 if (!uprobe) {
852 ret = -ENOMEM;
853 } else if (!consumer_add(uprobe, uc)) {
897 ret = __uprobe_register(uprobe); 854 ret = __uprobe_register(uprobe);
898 if (ret) { 855 if (ret) {
899 uprobe->consumers = NULL; 856 uprobe->consumers = NULL;
900 __uprobe_unregister(uprobe); 857 __uprobe_unregister(uprobe);
901 } else { 858 } else {
902 uprobe->flags |= UPROBE_RUN_HANDLER; 859 set_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
903 } 860 }
904 } 861 }
905 862
@@ -932,7 +889,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
932 if (consumer_del(uprobe, uc)) { 889 if (consumer_del(uprobe, uc)) {
933 if (!uprobe->consumers) { 890 if (!uprobe->consumers) {
934 __uprobe_unregister(uprobe); 891 __uprobe_unregister(uprobe);
935 uprobe->flags &= ~UPROBE_RUN_HANDLER; 892 clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
936 } 893 }
937 } 894 }
938 895
@@ -1393,10 +1350,11 @@ bool uprobe_deny_signal(void)
1393 */ 1350 */
1394static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) 1351static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1395{ 1352{
1396 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) 1353 if (test_bit(UPROBE_SKIP_SSTEP, &uprobe->flags)) {
1397 return true; 1354 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1398 1355 return true;
1399 uprobe->flags &= ~UPROBE_SKIP_SSTEP; 1356 clear_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
1357 }
1400 return false; 1358 return false;
1401} 1359}
1402 1360
@@ -1419,6 +1377,30 @@ static void mmf_recalc_uprobes(struct mm_struct *mm)
1419 clear_bit(MMF_HAS_UPROBES, &mm->flags); 1377 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1420} 1378}
1421 1379
1380static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
1381{
1382 struct page *page;
1383 uprobe_opcode_t opcode;
1384 int result;
1385
1386 pagefault_disable();
1387 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
1388 sizeof(opcode));
1389 pagefault_enable();
1390
1391 if (likely(result == 0))
1392 goto out;
1393
1394 result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1395 if (result < 0)
1396 return result;
1397
1398 copy_opcode(page, vaddr, &opcode);
1399 put_page(page);
1400 out:
1401 return is_swbp_insn(&opcode);
1402}
1403
1422static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 1404static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1423{ 1405{
1424 struct mm_struct *mm = current->mm; 1406 struct mm_struct *mm = current->mm;
@@ -1489,38 +1471,41 @@ static void handle_swbp(struct pt_regs *regs)
1489 } 1471 }
1490 return; 1472 return;
1491 } 1473 }
1474 /*
1475 * TODO: move copy_insn/etc into _register and remove this hack.
1476 * After we hit the bp, _unregister + _register can install the
1477 * new and not-yet-analyzed uprobe at the same address, restart.
1478 */
1479 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1480 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1481 goto restart;
1492 1482
1493 utask = current->utask; 1483 utask = current->utask;
1494 if (!utask) { 1484 if (!utask) {
1495 utask = add_utask(); 1485 utask = add_utask();
1496 /* Cannot allocate; re-execute the instruction. */ 1486 /* Cannot allocate; re-execute the instruction. */
1497 if (!utask) 1487 if (!utask)
1498 goto cleanup_ret; 1488 goto restart;
1499 } 1489 }
1500 utask->active_uprobe = uprobe; 1490
1501 handler_chain(uprobe, regs); 1491 handler_chain(uprobe, regs);
1502 if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs)) 1492 if (can_skip_sstep(uprobe, regs))
1503 goto cleanup_ret; 1493 goto out;
1504 1494
1505 utask->state = UTASK_SSTEP;
1506 if (!pre_ssout(uprobe, regs, bp_vaddr)) { 1495 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1507 arch_uprobe_enable_step(&uprobe->arch); 1496 arch_uprobe_enable_step(&uprobe->arch);
1497 utask->active_uprobe = uprobe;
1498 utask->state = UTASK_SSTEP;
1508 return; 1499 return;
1509 } 1500 }
1510 1501
1511cleanup_ret: 1502restart:
1512 if (utask) { 1503 /*
1513 utask->active_uprobe = NULL; 1504 * cannot singlestep; cannot skip instruction;
1514 utask->state = UTASK_RUNNING; 1505 * re-execute the instruction.
1515 } 1506 */
1516 if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) 1507 instruction_pointer_set(regs, bp_vaddr);
1517 1508out:
1518 /*
1519 * cannot singlestep; cannot skip instruction;
1520 * re-execute the instruction.
1521 */
1522 instruction_pointer_set(regs, bp_vaddr);
1523
1524 put_uprobe(uprobe); 1509 put_uprobe(uprobe);
1525} 1510}
1526 1511
@@ -1552,13 +1537,12 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1552} 1537}
1553 1538
1554/* 1539/*
1555 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on 1540 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1556 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and 1541 * allows the thread to return from interrupt. After that handle_swbp()
1557 * allows the thread to return from interrupt. 1542 * sets utask->active_uprobe.
1558 * 1543 *
1559 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and 1544 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1560 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from 1545 * and allows the thread to return from interrupt.
1561 * interrupt.
1562 * 1546 *
1563 * While returning to userspace, thread notices the TIF_UPROBE flag and calls 1547 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1564 * uprobe_notify_resume(). 1548 * uprobe_notify_resume().
@@ -1567,11 +1551,13 @@ void uprobe_notify_resume(struct pt_regs *regs)
1567{ 1551{
1568 struct uprobe_task *utask; 1552 struct uprobe_task *utask;
1569 1553
1554 clear_thread_flag(TIF_UPROBE);
1555
1570 utask = current->utask; 1556 utask = current->utask;
1571 if (!utask || utask->state == UTASK_BP_HIT) 1557 if (utask && utask->active_uprobe)
1572 handle_swbp(regs);
1573 else
1574 handle_singlestep(utask, regs); 1558 handle_singlestep(utask, regs);
1559 else
1560 handle_swbp(regs);
1575} 1561}
1576 1562
1577/* 1563/*
@@ -1580,17 +1566,10 @@ void uprobe_notify_resume(struct pt_regs *regs)
1580 */ 1566 */
1581int uprobe_pre_sstep_notifier(struct pt_regs *regs) 1567int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1582{ 1568{
1583 struct uprobe_task *utask;
1584
1585 if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags)) 1569 if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
1586 return 0; 1570 return 0;
1587 1571
1588 utask = current->utask;
1589 if (utask)
1590 utask->state = UTASK_BP_HIT;
1591
1592 set_thread_flag(TIF_UPROBE); 1572 set_thread_flag(TIF_UPROBE);
1593
1594 return 1; 1573 return 1;
1595} 1574}
1596 1575