diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-12-08 09:50:23 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-12-08 09:51:10 -0500 |
commit | 7e0dd574cd6b1bcc818ed4251e5ceda7d8bee08f (patch) | |
tree | 04f5630e361083ab53da43a46d6c47a52e8b87ca /kernel/events | |
parent | f0b9abfb044649bc452fb2fb975ff2fd599cc6a3 (diff) | |
parent | 32cdba1e05418909708a17e52505e8b2ba4381d1 (diff) |
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core
Pull uprobes fixes, cleanups and preparation for the ARM port from Oleg Nesterov.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/uprobes.c | 43 |
1 files changed, 28 insertions, 15 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 5cc4e7e42e68..dea7acfbb071 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/ptrace.h> /* user_enable_single_step */ | 33 | #include <linux/ptrace.h> /* user_enable_single_step */ |
34 | #include <linux/kdebug.h> /* notifier mechanism */ | 34 | #include <linux/kdebug.h> /* notifier mechanism */ |
35 | #include "../../mm/internal.h" /* munlock_vma_page */ | 35 | #include "../../mm/internal.h" /* munlock_vma_page */ |
36 | #include <linux/percpu-rwsem.h> | ||
36 | 37 | ||
37 | #include <linux/uprobes.h> | 38 | #include <linux/uprobes.h> |
38 | 39 | ||
@@ -71,6 +72,8 @@ static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; | |||
71 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; | 72 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; |
72 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) | 73 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
73 | 74 | ||
75 | static struct percpu_rw_semaphore dup_mmap_sem; | ||
76 | |||
74 | /* | 77 | /* |
75 | * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe | 78 | * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe |
76 | * events active at this time. Probably a fine grained per inode count is | 79 | * events active at this time. Probably a fine grained per inode count is |
@@ -766,10 +769,13 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) | |||
766 | struct map_info *info; | 769 | struct map_info *info; |
767 | int err = 0; | 770 | int err = 0; |
768 | 771 | ||
772 | percpu_down_write(&dup_mmap_sem); | ||
769 | info = build_map_info(uprobe->inode->i_mapping, | 773 | info = build_map_info(uprobe->inode->i_mapping, |
770 | uprobe->offset, is_register); | 774 | uprobe->offset, is_register); |
771 | if (IS_ERR(info)) | 775 | if (IS_ERR(info)) { |
772 | return PTR_ERR(info); | 776 | err = PTR_ERR(info); |
777 | goto out; | ||
778 | } | ||
773 | 779 | ||
774 | while (info) { | 780 | while (info) { |
775 | struct mm_struct *mm = info->mm; | 781 | struct mm_struct *mm = info->mm; |
@@ -799,7 +805,8 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) | |||
799 | mmput(mm); | 805 | mmput(mm); |
800 | info = free_map_info(info); | 806 | info = free_map_info(info); |
801 | } | 807 | } |
802 | 808 | out: | |
809 | percpu_up_write(&dup_mmap_sem); | ||
803 | return err; | 810 | return err; |
804 | } | 811 | } |
805 | 812 | ||
@@ -1131,6 +1138,16 @@ void uprobe_clear_state(struct mm_struct *mm) | |||
1131 | kfree(area); | 1138 | kfree(area); |
1132 | } | 1139 | } |
1133 | 1140 | ||
1141 | void uprobe_start_dup_mmap(void) | ||
1142 | { | ||
1143 | percpu_down_read(&dup_mmap_sem); | ||
1144 | } | ||
1145 | |||
1146 | void uprobe_end_dup_mmap(void) | ||
1147 | { | ||
1148 | percpu_up_read(&dup_mmap_sem); | ||
1149 | } | ||
1150 | |||
1134 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) | 1151 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
1135 | { | 1152 | { |
1136 | newmm->uprobes_state.xol_area = NULL; | 1153 | newmm->uprobes_state.xol_area = NULL; |
@@ -1199,6 +1216,11 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot | |||
1199 | vaddr = kmap_atomic(area->page); | 1216 | vaddr = kmap_atomic(area->page); |
1200 | memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); | 1217 | memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); |
1201 | kunmap_atomic(vaddr); | 1218 | kunmap_atomic(vaddr); |
1219 | /* | ||
1220 | * We probably need flush_icache_user_range() but it needs vma. | ||
1221 | * This should work on supported architectures too. | ||
1222 | */ | ||
1223 | flush_dcache_page(area->page); | ||
1202 | 1224 | ||
1203 | return current->utask->xol_vaddr; | 1225 | return current->utask->xol_vaddr; |
1204 | } | 1226 | } |
@@ -1430,16 +1452,6 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) | |||
1430 | return uprobe; | 1452 | return uprobe; |
1431 | } | 1453 | } |
1432 | 1454 | ||
1433 | void __weak arch_uprobe_enable_step(struct arch_uprobe *arch) | ||
1434 | { | ||
1435 | user_enable_single_step(current); | ||
1436 | } | ||
1437 | |||
1438 | void __weak arch_uprobe_disable_step(struct arch_uprobe *arch) | ||
1439 | { | ||
1440 | user_disable_single_step(current); | ||
1441 | } | ||
1442 | |||
1443 | /* | 1455 | /* |
1444 | * Run handler and ask thread to singlestep. | 1456 | * Run handler and ask thread to singlestep. |
1445 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. | 1457 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
@@ -1493,7 +1505,6 @@ static void handle_swbp(struct pt_regs *regs) | |||
1493 | goto out; | 1505 | goto out; |
1494 | 1506 | ||
1495 | if (!pre_ssout(uprobe, regs, bp_vaddr)) { | 1507 | if (!pre_ssout(uprobe, regs, bp_vaddr)) { |
1496 | arch_uprobe_enable_step(&uprobe->arch); | ||
1497 | utask->active_uprobe = uprobe; | 1508 | utask->active_uprobe = uprobe; |
1498 | utask->state = UTASK_SSTEP; | 1509 | utask->state = UTASK_SSTEP; |
1499 | return; | 1510 | return; |
@@ -1525,7 +1536,6 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) | |||
1525 | else | 1536 | else |
1526 | WARN_ON_ONCE(1); | 1537 | WARN_ON_ONCE(1); |
1527 | 1538 | ||
1528 | arch_uprobe_disable_step(&uprobe->arch); | ||
1529 | put_uprobe(uprobe); | 1539 | put_uprobe(uprobe); |
1530 | utask->active_uprobe = NULL; | 1540 | utask->active_uprobe = NULL; |
1531 | utask->state = UTASK_RUNNING; | 1541 | utask->state = UTASK_RUNNING; |
@@ -1604,6 +1614,9 @@ static int __init init_uprobes(void) | |||
1604 | mutex_init(&uprobes_mmap_mutex[i]); | 1614 | mutex_init(&uprobes_mmap_mutex[i]); |
1605 | } | 1615 | } |
1606 | 1616 | ||
1617 | if (percpu_init_rwsem(&dup_mmap_sem)) | ||
1618 | return -ENOMEM; | ||
1619 | |||
1607 | return register_die_notifier(&uprobe_exception_nb); | 1620 | return register_die_notifier(&uprobe_exception_nb); |
1608 | } | 1621 | } |
1609 | module_init(init_uprobes); | 1622 | module_init(init_uprobes); |