diff options
author | Oleg Nesterov <oleg@redhat.com> | 2012-11-25 13:54:29 -0500 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2013-02-08 11:47:08 -0500 |
commit | 441f1eb7db8babe2b6b4bc805f023739dbb70e33 (patch) | |
tree | 4c57b4553ce1b325c677d3e7204a2444fe577a99 /kernel/events | |
parent | d4d3ccc6d1eb74bd315d49a3829c5ad6c48d21b0 (diff) |
uprobes: Kill uprobe_events, use RB_EMPTY_ROOT() instead
uprobe_events counts the number of uprobes in uprobes_tree but
it is used as a boolean. We can use RB_EMPTY_ROOT() instead.
Probably no_uprobe_events() added by this patch can have more
callers, say, mmf_recalc_uprobes().
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Anton Arapov <anton@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/uprobes.c | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 40ced98b2bc8..5d38b40644b8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -41,6 +41,11 @@ | |||
41 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE | 41 | #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE |
42 | 42 | ||
43 | static struct rb_root uprobes_tree = RB_ROOT; | 43 | static struct rb_root uprobes_tree = RB_ROOT; |
44 | /* | ||
45 | * allows us to skip the uprobe_mmap if there are no uprobe events active | ||
46 | * at this time. Probably a fine grained per inode count is better? | ||
47 | */ | ||
48 | #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree) | ||
44 | 49 | ||
45 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ | 50 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
46 | 51 | ||
@@ -74,13 +79,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; | |||
74 | 79 | ||
75 | static struct percpu_rw_semaphore dup_mmap_sem; | 80 | static struct percpu_rw_semaphore dup_mmap_sem; |
76 | 81 | ||
77 | /* | ||
78 | * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe | ||
79 | * events active at this time. Probably a fine grained per inode count is | ||
80 | * better? | ||
81 | */ | ||
82 | static atomic_t uprobe_events = ATOMIC_INIT(0); | ||
83 | |||
84 | /* Have a copy of original instruction */ | 82 | /* Have a copy of original instruction */ |
85 | #define UPROBE_COPY_INSN 0 | 83 | #define UPROBE_COPY_INSN 0 |
86 | /* Can skip singlestep */ | 84 | /* Can skip singlestep */ |
@@ -460,8 +458,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) | |||
460 | kfree(uprobe); | 458 | kfree(uprobe); |
461 | uprobe = cur_uprobe; | 459 | uprobe = cur_uprobe; |
462 | iput(inode); | 460 | iput(inode); |
463 | } else { | ||
464 | atomic_inc(&uprobe_events); | ||
465 | } | 461 | } |
466 | 462 | ||
467 | return uprobe; | 463 | return uprobe; |
@@ -685,7 +681,6 @@ static void delete_uprobe(struct uprobe *uprobe) | |||
685 | spin_unlock(&uprobes_treelock); | 681 | spin_unlock(&uprobes_treelock); |
686 | iput(uprobe->inode); | 682 | iput(uprobe->inode); |
687 | put_uprobe(uprobe); | 683 | put_uprobe(uprobe); |
688 | atomic_dec(&uprobe_events); | ||
689 | } | 684 | } |
690 | 685 | ||
691 | struct map_info { | 686 | struct map_info { |
@@ -975,7 +970,7 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
975 | struct uprobe *uprobe, *u; | 970 | struct uprobe *uprobe, *u; |
976 | struct inode *inode; | 971 | struct inode *inode; |
977 | 972 | ||
978 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) | 973 | if (no_uprobe_events() || !valid_vma(vma, true)) |
979 | return 0; | 974 | return 0; |
980 | 975 | ||
981 | inode = vma->vm_file->f_mapping->host; | 976 | inode = vma->vm_file->f_mapping->host; |
@@ -1021,7 +1016,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e | |||
1021 | */ | 1016 | */ |
1022 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 1017 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1023 | { | 1018 | { |
1024 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) | 1019 | if (no_uprobe_events() || !valid_vma(vma, false)) |
1025 | return; | 1020 | return; |
1026 | 1021 | ||
1027 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ | 1022 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |