aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/events/uprobes.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 40ced98b2bc8..5d38b40644b8 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -41,6 +41,11 @@
41#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE 41#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
42 42
43static struct rb_root uprobes_tree = RB_ROOT; 43static struct rb_root uprobes_tree = RB_ROOT;
44/*
45 * allows us to skip the uprobe_mmap if there are no uprobe events active
46 * at this time. Probably a fine grained per inode count is better?
47 */
48#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
44 49
45static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ 50static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
46 51
@@ -74,13 +79,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
74 79
75static struct percpu_rw_semaphore dup_mmap_sem; 80static struct percpu_rw_semaphore dup_mmap_sem;
76 81
77/*
78 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
79 * events active at this time. Probably a fine grained per inode count is
80 * better?
81 */
82static atomic_t uprobe_events = ATOMIC_INIT(0);
83
84/* Have a copy of original instruction */ 82/* Have a copy of original instruction */
85#define UPROBE_COPY_INSN 0 83#define UPROBE_COPY_INSN 0
86/* Can skip singlestep */ 84/* Can skip singlestep */
@@ -460,8 +458,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
460 kfree(uprobe); 458 kfree(uprobe);
461 uprobe = cur_uprobe; 459 uprobe = cur_uprobe;
462 iput(inode); 460 iput(inode);
463 } else {
464 atomic_inc(&uprobe_events);
465 } 461 }
466 462
467 return uprobe; 463 return uprobe;
@@ -685,7 +681,6 @@ static void delete_uprobe(struct uprobe *uprobe)
685 spin_unlock(&uprobes_treelock); 681 spin_unlock(&uprobes_treelock);
686 iput(uprobe->inode); 682 iput(uprobe->inode);
687 put_uprobe(uprobe); 683 put_uprobe(uprobe);
688 atomic_dec(&uprobe_events);
689} 684}
690 685
691struct map_info { 686struct map_info {
@@ -975,7 +970,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
975 struct uprobe *uprobe, *u; 970 struct uprobe *uprobe, *u;
976 struct inode *inode; 971 struct inode *inode;
977 972
978 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) 973 if (no_uprobe_events() || !valid_vma(vma, true))
979 return 0; 974 return 0;
980 975
981 inode = vma->vm_file->f_mapping->host; 976 inode = vma->vm_file->f_mapping->host;
@@ -1021,7 +1016,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e
1021 */ 1016 */
1022void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1017void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1023{ 1018{
1024 if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) 1019 if (no_uprobe_events() || !valid_vma(vma, false))
1025 return; 1020 return;
1026 1021
1027 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ 1022 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */