diff options
author | Oleg Nesterov <oleg@redhat.com> | 2012-05-29 15:30:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-06-06 11:22:22 -0400 |
commit | 778b032d96909690c19d84f8d17c13be65ed6f8e (patch) | |
tree | 2f788c3843ca416969aeceda155c79994467bded /kernel | |
parent | 56bb4cf6475d702d2fb00fc641aa6441097c0330 (diff) |
uprobes: Kill uprobes_srcu/uprobe_srcu_id
Kill the no longer needed uprobes_srcu/uprobe_srcu_id code.
It doesn't really work anyway. synchronize_srcu() can only
synchronize with the code "inside" the
srcu_read_lock/srcu_read_unlock section, while
uprobe_pre_sstep_notifier() does srcu_read_lock() _after_ we
already hit the breakpoint.
I guess this probably works "in practice". synchronize_srcu() is
slow and it implies synchronize_sched(), and the probed task
enters the non- preemptible section at the start of exception
handler. Still this is not right at least in theory, and
task->uprobe_srcu_id blows task_struct.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anton Arapov <anton@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120529193008.GG8057@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/uprobes.c | 22 |
1 files changed, 3 insertions, 19 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1f02e3bbfc1d..8c5e043cd309 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) | 38 | #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) |
39 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE | 39 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
40 | 40 | ||
41 | static struct srcu_struct uprobes_srcu; | ||
42 | static struct rb_root uprobes_tree = RB_ROOT; | 41 | static struct rb_root uprobes_tree = RB_ROOT; |
43 | 42 | ||
44 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ | 43 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
@@ -738,20 +737,14 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr) | |||
738 | } | 737 | } |
739 | 738 | ||
740 | /* | 739 | /* |
741 | * There could be threads that have hit the breakpoint and are entering the | 740 | * There could be threads that have already hit the breakpoint. They |
742 | * notifier code and trying to acquire the uprobes_treelock. The thread | 741 | * will recheck the current insn and restart if find_uprobe() fails. |
743 | * calling delete_uprobe() that is removing the uprobe from the rb_tree can | 742 | * See find_active_uprobe(). |
744 | * race with these threads and might acquire the uprobes_treelock compared | ||
745 | * to some of the breakpoint hit threads. In such a case, the breakpoint | ||
746 | * hit threads will not find the uprobe. The current unregistering thread | ||
747 | * waits till all other threads have hit a breakpoint, to acquire the | ||
748 | * uprobes_treelock before the uprobe is removed from the rbtree. | ||
749 | */ | 743 | */ |
750 | static void delete_uprobe(struct uprobe *uprobe) | 744 | static void delete_uprobe(struct uprobe *uprobe) |
751 | { | 745 | { |
752 | unsigned long flags; | 746 | unsigned long flags; |
753 | 747 | ||
754 | synchronize_srcu(&uprobes_srcu); | ||
755 | spin_lock_irqsave(&uprobes_treelock, flags); | 748 | spin_lock_irqsave(&uprobes_treelock, flags); |
756 | rb_erase(&uprobe->rb_node, &uprobes_tree); | 749 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
757 | spin_unlock_irqrestore(&uprobes_treelock, flags); | 750 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
@@ -1388,9 +1381,6 @@ void uprobe_free_utask(struct task_struct *t) | |||
1388 | { | 1381 | { |
1389 | struct uprobe_task *utask = t->utask; | 1382 | struct uprobe_task *utask = t->utask; |
1390 | 1383 | ||
1391 | if (t->uprobe_srcu_id != -1) | ||
1392 | srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id); | ||
1393 | |||
1394 | if (!utask) | 1384 | if (!utask) |
1395 | return; | 1385 | return; |
1396 | 1386 | ||
@@ -1408,7 +1398,6 @@ void uprobe_free_utask(struct task_struct *t) | |||
1408 | void uprobe_copy_process(struct task_struct *t) | 1398 | void uprobe_copy_process(struct task_struct *t) |
1409 | { | 1399 | { |
1410 | t->utask = NULL; | 1400 | t->utask = NULL; |
1411 | t->uprobe_srcu_id = -1; | ||
1412 | } | 1401 | } |
1413 | 1402 | ||
1414 | /* | 1403 | /* |
@@ -1513,9 +1502,6 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) | |||
1513 | } else { | 1502 | } else { |
1514 | *is_swbp = -EFAULT; | 1503 | *is_swbp = -EFAULT; |
1515 | } | 1504 | } |
1516 | |||
1517 | srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id); | ||
1518 | current->uprobe_srcu_id = -1; | ||
1519 | up_read(&mm->mmap_sem); | 1505 | up_read(&mm->mmap_sem); |
1520 | 1506 | ||
1521 | return uprobe; | 1507 | return uprobe; |
@@ -1656,7 +1642,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs) | |||
1656 | utask->state = UTASK_BP_HIT; | 1642 | utask->state = UTASK_BP_HIT; |
1657 | 1643 | ||
1658 | set_thread_flag(TIF_UPROBE); | 1644 | set_thread_flag(TIF_UPROBE); |
1659 | current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu); | ||
1660 | 1645 | ||
1661 | return 1; | 1646 | return 1; |
1662 | } | 1647 | } |
@@ -1691,7 +1676,6 @@ static int __init init_uprobes(void) | |||
1691 | mutex_init(&uprobes_mutex[i]); | 1676 | mutex_init(&uprobes_mutex[i]); |
1692 | mutex_init(&uprobes_mmap_mutex[i]); | 1677 | mutex_init(&uprobes_mmap_mutex[i]); |
1693 | } | 1678 | } |
1694 | init_srcu_struct(&uprobes_srcu); | ||
1695 | 1679 | ||
1696 | return register_die_notifier(&uprobe_exception_nb); | 1680 | return register_die_notifier(&uprobe_exception_nb); |
1697 | } | 1681 | } |