aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-08-19 11:41:34 -0400
committerOleg Nesterov <oleg@redhat.com>2012-09-15 11:37:27 -0400
commit499a4f3ec057a0f79636cc3c1e581bb6e977a30f (patch)
tree6c24cbd3d9eeb3d1711da4b5aa114e6ff17955c9 /kernel
parent9f68f672c47b9bd4cfe0a667ecb0b1382c61e2de (diff)
uprobes: Teach find_active_uprobe() to clear MMF_HAS_UPROBES
The wrong MMF_HAS_UPROBES doesn't really hurt, just it triggers the "slow" and unnecessary handle_swbp() path if the task hits the non-uprobe breakpoint. So this patch changes find_active_uprobe() to check every valid vma and clear MMF_HAS_UPROBES if no uprobes were found. This is adds the slow O(n) path, but it is only called in unlikely case when the task hits the normal breakpoint first time after uprobe_unregister(). Note the "not strictly accurate" comment in mmf_recalc_uprobes(). We can fix this, we only need to teach vma_has_uprobes() to return a bit more more info, but I am not sure this worth the trouble. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/uprobes.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 9a7f08bab91f..e4a906ce2e1d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1396,6 +1396,25 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1396 return false; 1396 return false;
1397} 1397}
1398 1398
1399static void mmf_recalc_uprobes(struct mm_struct *mm)
1400{
1401 struct vm_area_struct *vma;
1402
1403 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1404 if (!valid_vma(vma, false))
1405 continue;
1406 /*
1407 * This is not strictly accurate, we can race with
1408 * uprobe_unregister() and see the already removed
1409 * uprobe if delete_uprobe() was not yet called.
1410 */
1411 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1412 return;
1413 }
1414
1415 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1416}
1417
1399static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) 1418static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1400{ 1419{
1401 struct mm_struct *mm = current->mm; 1420 struct mm_struct *mm = current->mm;
@@ -1417,6 +1436,9 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1417 } else { 1436 } else {
1418 *is_swbp = -EFAULT; 1437 *is_swbp = -EFAULT;
1419 } 1438 }
1439
1440 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1441 mmf_recalc_uprobes(mm);
1420 up_read(&mm->mmap_sem); 1442 up_read(&mm->mmap_sem);
1421 1443
1422 return uprobe; 1444 return uprobe;