aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-09-19 11:03:07 -0400
committerIngo Molnar <mingo@kernel.org>2012-09-19 11:03:07 -0400
commitd0616c1775035496fb355248d296fb16ea7fb235 (patch)
tree7a6cbefa1ba8ed3fd1e03d3267b196d074c47279 /kernel
parentbea8f35421628266658c14ea990d18b0969c4c0b (diff)
parentbaedbf02b1912225d60dd7403acb4b4e003088b5 (diff)
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core
Pull uprobes fixes + cleanups from Oleg Nesterov. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/uprobes.c99
1 files changed, 79 insertions, 20 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 1666632e6edf..912ef48d28ab 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -411,11 +411,10 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
411static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) 411static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
412{ 412{
413 struct uprobe *uprobe; 413 struct uprobe *uprobe;
414 unsigned long flags;
415 414
416 spin_lock_irqsave(&uprobes_treelock, flags); 415 spin_lock(&uprobes_treelock);
417 uprobe = __find_uprobe(inode, offset); 416 uprobe = __find_uprobe(inode, offset);
418 spin_unlock_irqrestore(&uprobes_treelock, flags); 417 spin_unlock(&uprobes_treelock);
419 418
420 return uprobe; 419 return uprobe;
421} 420}
@@ -462,12 +461,11 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
462 */ 461 */
463static struct uprobe *insert_uprobe(struct uprobe *uprobe) 462static struct uprobe *insert_uprobe(struct uprobe *uprobe)
464{ 463{
465 unsigned long flags;
466 struct uprobe *u; 464 struct uprobe *u;
467 465
468 spin_lock_irqsave(&uprobes_treelock, flags); 466 spin_lock(&uprobes_treelock);
469 u = __insert_uprobe(uprobe); 467 u = __insert_uprobe(uprobe);
470 spin_unlock_irqrestore(&uprobes_treelock, flags); 468 spin_unlock(&uprobes_treelock);
471 469
472 /* For now assume that the instruction need not be single-stepped */ 470 /* For now assume that the instruction need not be single-stepped */
473 uprobe->flags |= UPROBE_SKIP_SSTEP; 471 uprobe->flags |= UPROBE_SKIP_SSTEP;
@@ -686,7 +684,9 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
686 set_bit(MMF_HAS_UPROBES, &mm->flags); 684 set_bit(MMF_HAS_UPROBES, &mm->flags);
687 685
688 ret = set_swbp(&uprobe->arch, mm, vaddr); 686 ret = set_swbp(&uprobe->arch, mm, vaddr);
689 if (ret && first_uprobe) 687 if (!ret)
688 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
689 else if (first_uprobe)
690 clear_bit(MMF_HAS_UPROBES, &mm->flags); 690 clear_bit(MMF_HAS_UPROBES, &mm->flags);
691 691
692 return ret; 692 return ret;
@@ -695,6 +695,11 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
695static void 695static void
696remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) 696remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
697{ 697{
698 /* can happen if uprobe_register() fails */
699 if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
700 return;
701
702 set_bit(MMF_RECALC_UPROBES, &mm->flags);
698 set_orig_insn(&uprobe->arch, mm, vaddr); 703 set_orig_insn(&uprobe->arch, mm, vaddr);
699} 704}
700 705
@@ -705,11 +710,9 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad
705 */ 710 */
706static void delete_uprobe(struct uprobe *uprobe) 711static void delete_uprobe(struct uprobe *uprobe)
707{ 712{
708 unsigned long flags; 713 spin_lock(&uprobes_treelock);
709
710 spin_lock_irqsave(&uprobes_treelock, flags);
711 rb_erase(&uprobe->rb_node, &uprobes_tree); 714 rb_erase(&uprobe->rb_node, &uprobes_tree);
712 spin_unlock_irqrestore(&uprobes_treelock, flags); 715 spin_unlock(&uprobes_treelock);
713 iput(uprobe->inode); 716 iput(uprobe->inode);
714 put_uprobe(uprobe); 717 put_uprobe(uprobe);
715 atomic_dec(&uprobe_events); 718 atomic_dec(&uprobe_events);
@@ -897,7 +900,8 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
897 } 900 }
898 901
899 mutex_unlock(uprobes_hash(inode)); 902 mutex_unlock(uprobes_hash(inode));
900 put_uprobe(uprobe); 903 if (uprobe)
904 put_uprobe(uprobe);
901 905
902 return ret; 906 return ret;
903} 907}
@@ -967,7 +971,6 @@ static void build_probe_list(struct inode *inode,
967 struct list_head *head) 971 struct list_head *head)
968{ 972{
969 loff_t min, max; 973 loff_t min, max;
970 unsigned long flags;
971 struct rb_node *n, *t; 974 struct rb_node *n, *t;
972 struct uprobe *u; 975 struct uprobe *u;
973 976
@@ -975,7 +978,7 @@ static void build_probe_list(struct inode *inode,
975 min = vaddr_to_offset(vma, start); 978 min = vaddr_to_offset(vma, start);
976 max = min + (end - start) - 1; 979 max = min + (end - start) - 1;
977 980
978 spin_lock_irqsave(&uprobes_treelock, flags); 981 spin_lock(&uprobes_treelock);
979 n = find_node_in_range(inode, min, max); 982 n = find_node_in_range(inode, min, max);
980 if (n) { 983 if (n) {
981 for (t = n; t; t = rb_prev(t)) { 984 for (t = n; t; t = rb_prev(t)) {
@@ -993,7 +996,7 @@ static void build_probe_list(struct inode *inode,
993 atomic_inc(&u->ref); 996 atomic_inc(&u->ref);
994 } 997 }
995 } 998 }
996 spin_unlock_irqrestore(&uprobes_treelock, flags); 999 spin_unlock(&uprobes_treelock);
997} 1000}
998 1001
999/* 1002/*
@@ -1030,6 +1033,25 @@ int uprobe_mmap(struct vm_area_struct *vma)
1030 return 0; 1033 return 0;
1031} 1034}
1032 1035
1036static bool
1037vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1038{
1039 loff_t min, max;
1040 struct inode *inode;
1041 struct rb_node *n;
1042
1043 inode = vma->vm_file->f_mapping->host;
1044
1045 min = vaddr_to_offset(vma, start);
1046 max = min + (end - start) - 1;
1047
1048 spin_lock(&uprobes_treelock);
1049 n = find_node_in_range(inode, min, max);
1050 spin_unlock(&uprobes_treelock);
1051
1052 return !!n;
1053}
1054
1033/* 1055/*
1034 * Called in context of a munmap of a vma. 1056 * Called in context of a munmap of a vma.
1035 */ 1057 */
@@ -1041,10 +1063,12 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
1041 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1063 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1042 return; 1064 return;
1043 1065
1044 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) 1066 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1067 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1045 return; 1068 return;
1046 1069
1047 /* TODO: unmapping uprobe(s) will need more work */ 1070 if (vma_has_uprobes(vma, start, end))
1071 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1048} 1072}
1049 1073
1050/* Slot allocation for XOL */ 1074/* Slot allocation for XOL */
@@ -1150,8 +1174,11 @@ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1150{ 1174{
1151 newmm->uprobes_state.xol_area = NULL; 1175 newmm->uprobes_state.xol_area = NULL;
1152 1176
1153 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) 1177 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1154 set_bit(MMF_HAS_UPROBES, &newmm->flags); 1178 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1179 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1180 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1181 }
1155} 1182}
1156 1183
1157/* 1184/*
@@ -1369,6 +1396,25 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1369 return false; 1396 return false;
1370} 1397}
1371 1398
1399static void mmf_recalc_uprobes(struct mm_struct *mm)
1400{
1401 struct vm_area_struct *vma;
1402
1403 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1404 if (!valid_vma(vma, false))
1405 continue;
1406 /*
1407 * This is not strictly accurate, we can race with
1408 * uprobe_unregister() and see the already removed
1409 * uprobe if delete_uprobe() was not yet called.
1410 */
1411 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1412 return;
1413 }
1414
1415 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1416}
1417
1372static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 1418static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1373{ 1419{
1374 struct mm_struct *mm = current->mm; 1420 struct mm_struct *mm = current->mm;
@@ -1390,11 +1436,24 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1390 } else { 1436 } else {
1391 *is_swbp = -EFAULT; 1437 *is_swbp = -EFAULT;
1392 } 1438 }
1439
1440 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1441 mmf_recalc_uprobes(mm);
1393 up_read(&mm->mmap_sem); 1442 up_read(&mm->mmap_sem);
1394 1443
1395 return uprobe; 1444 return uprobe;
1396} 1445}
1397 1446
1447void __weak arch_uprobe_enable_step(struct arch_uprobe *arch)
1448{
1449 user_enable_single_step(current);
1450}
1451
1452void __weak arch_uprobe_disable_step(struct arch_uprobe *arch)
1453{
1454 user_disable_single_step(current);
1455}
1456
1398/* 1457/*
1399 * Run handler and ask thread to singlestep. 1458 * Run handler and ask thread to singlestep.
1400 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. 1459 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
@@ -1441,7 +1500,7 @@ static void handle_swbp(struct pt_regs *regs)
1441 1500
1442 utask->state = UTASK_SSTEP; 1501 utask->state = UTASK_SSTEP;
1443 if (!pre_ssout(uprobe, regs, bp_vaddr)) { 1502 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1444 user_enable_single_step(current); 1503 arch_uprobe_enable_step(&uprobe->arch);
1445 return; 1504 return;
1446 } 1505 }
1447 1506
@@ -1477,10 +1536,10 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1477 else 1536 else
1478 WARN_ON_ONCE(1); 1537 WARN_ON_ONCE(1);
1479 1538
1539 arch_uprobe_disable_step(&uprobe->arch);
1480 put_uprobe(uprobe); 1540 put_uprobe(uprobe);
1481 utask->active_uprobe = NULL; 1541 utask->active_uprobe = NULL;
1482 utask->state = UTASK_RUNNING; 1542 utask->state = UTASK_RUNNING;
1483 user_disable_single_step(current);
1484 xol_free_insn_slot(current); 1543 xol_free_insn_slot(current);
1485 1544
1486 spin_lock_irq(&current->sighand->siglock); 1545 spin_lock_irq(&current->sighand->siglock);