diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-11-21 03:59:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-21 03:59:27 -0500 |
commit | 09897d78dbc3a544426f2272b5601c62922ccab9 (patch) | |
tree | 4c10b34aa8cbac758e5fe094a79687a9a763e28c | |
parent | e98a6e59dff885eb387163b1a7abe019a44ba90b (diff) | |
parent | ad439356ae5ae7688b39f1107fd5b874850fec18 (diff) |
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core
Pull uprobes cleanups from Oleg Nesterov.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/powerpc/include/asm/uprobes.h | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/uprobes.c | 2 | ||||
-rw-r--r-- | include/linux/uprobes.h | 52 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 60 |
4 files changed, 66 insertions, 53 deletions
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 75c6ecdb8f37..7422a999a39a 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h | |||
@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; | |||
36 | 36 | ||
37 | struct arch_uprobe { | 37 | struct arch_uprobe { |
38 | union { | 38 | union { |
39 | u8 insn[MAX_UINSN_BYTES]; | 39 | u32 insn; |
40 | u8 ixol[MAX_UINSN_BYTES]; | 40 | u32 ixol; |
41 | u32 ainsn; | ||
42 | }; | 41 | }; |
43 | }; | 42 | }; |
44 | 43 | ||
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 59f419b935f2..003b20964ea0 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c | |||
@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
186 | * emulate_step() returns 1 if the insn was successfully emulated. | 186 | * emulate_step() returns 1 if the insn was successfully emulated. |
187 | * For all other cases, we need to single-step in hardware. | 187 | * For all other cases, we need to single-step in hardware. |
188 | */ | 188 | */ |
189 | ret = emulate_step(regs, auprobe->ainsn); | 189 | ret = emulate_step(regs, auprobe->insn); |
190 | if (ret > 0) | 190 | if (ret > 0) |
191 | return true; | 191 | return true; |
192 | 192 | ||
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 319eae70fe84..e32251e00e62 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
@@ -26,16 +26,13 @@ | |||
26 | 26 | ||
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
29 | #include <linux/types.h> | ||
29 | 30 | ||
30 | struct vm_area_struct; | 31 | struct vm_area_struct; |
31 | struct mm_struct; | 32 | struct mm_struct; |
32 | struct inode; | 33 | struct inode; |
33 | struct notifier_block; | 34 | struct notifier_block; |
34 | 35 | ||
35 | #ifdef CONFIG_ARCH_SUPPORTS_UPROBES | ||
36 | # include <asm/uprobes.h> | ||
37 | #endif | ||
38 | |||
39 | #define UPROBE_HANDLER_REMOVE 1 | 36 | #define UPROBE_HANDLER_REMOVE 1 |
40 | #define UPROBE_HANDLER_MASK 1 | 37 | #define UPROBE_HANDLER_MASK 1 |
41 | 38 | ||
@@ -60,6 +57,8 @@ struct uprobe_consumer { | |||
60 | }; | 57 | }; |
61 | 58 | ||
62 | #ifdef CONFIG_UPROBES | 59 | #ifdef CONFIG_UPROBES |
60 | #include <asm/uprobes.h> | ||
61 | |||
63 | enum uprobe_task_state { | 62 | enum uprobe_task_state { |
64 | UTASK_RUNNING, | 63 | UTASK_RUNNING, |
65 | UTASK_SSTEP, | 64 | UTASK_SSTEP, |
@@ -72,35 +71,28 @@ enum uprobe_task_state { | |||
72 | */ | 71 | */ |
73 | struct uprobe_task { | 72 | struct uprobe_task { |
74 | enum uprobe_task_state state; | 73 | enum uprobe_task_state state; |
75 | struct arch_uprobe_task autask; | ||
76 | 74 | ||
77 | struct return_instance *return_instances; | 75 | union { |
78 | unsigned int depth; | 76 | struct { |
79 | struct uprobe *active_uprobe; | 77 | struct arch_uprobe_task autask; |
78 | unsigned long vaddr; | ||
79 | }; | ||
80 | 80 | ||
81 | struct { | ||
82 | struct callback_head dup_xol_work; | ||
83 | unsigned long dup_xol_addr; | ||
84 | }; | ||
85 | }; | ||
86 | |||
87 | struct uprobe *active_uprobe; | ||
81 | unsigned long xol_vaddr; | 88 | unsigned long xol_vaddr; |
82 | unsigned long vaddr; | ||
83 | }; | ||
84 | 89 | ||
85 | /* | 90 | struct return_instance *return_instances; |
86 | * On a breakpoint hit, thread contests for a slot. It frees the | 91 | unsigned int depth; |
87 | * slot after singlestep. Currently a fixed number of slots are | ||
88 | * allocated. | ||
89 | */ | ||
90 | struct xol_area { | ||
91 | wait_queue_head_t wq; /* if all slots are busy */ | ||
92 | atomic_t slot_count; /* number of in-use slots */ | ||
93 | unsigned long *bitmap; /* 0 = free slot */ | ||
94 | struct page *page; | ||
95 | |||
96 | /* | ||
97 | * We keep the vma's vm_start rather than a pointer to the vma | ||
98 | * itself. The probed process or a naughty kernel module could make | ||
99 | * the vma go away, and we must handle that reasonably gracefully. | ||
100 | */ | ||
101 | unsigned long vaddr; /* Page(s) of instruction slots */ | ||
102 | }; | 92 | }; |
103 | 93 | ||
94 | struct xol_area; | ||
95 | |||
104 | struct uprobes_state { | 96 | struct uprobes_state { |
105 | struct xol_area *xol_area; | 97 | struct xol_area *xol_area; |
106 | }; | 98 | }; |
@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign | |||
109 | extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); | 101 | extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); |
110 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); | 102 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); |
111 | extern bool __weak is_trap_insn(uprobe_opcode_t *insn); | 103 | extern bool __weak is_trap_insn(uprobe_opcode_t *insn); |
104 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); | ||
112 | extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); | 105 | extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); |
113 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); | 106 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); |
114 | extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); | 107 | extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); |
@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void); | |||
120 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); | 113 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); |
121 | extern void uprobe_free_utask(struct task_struct *t); | 114 | extern void uprobe_free_utask(struct task_struct *t); |
122 | extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); | 115 | extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); |
123 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); | ||
124 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); | 116 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); |
125 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); | 117 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); |
126 | extern void uprobe_notify_resume(struct pt_regs *regs); | 118 | extern void uprobe_notify_resume(struct pt_regs *regs); |
@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void) | |||
176 | { | 168 | { |
177 | return false; | 169 | return false; |
178 | } | 170 | } |
179 | static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) | ||
180 | { | ||
181 | return 0; | ||
182 | } | ||
183 | static inline void uprobe_free_utask(struct task_struct *t) | 171 | static inline void uprobe_free_utask(struct task_struct *t) |
184 | { | 172 | { |
185 | } | 173 | } |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 24b7d6ca871b..b886a5e7d4ff 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -73,6 +73,17 @@ struct uprobe { | |||
73 | struct inode *inode; /* Also hold a ref to inode */ | 73 | struct inode *inode; /* Also hold a ref to inode */ |
74 | loff_t offset; | 74 | loff_t offset; |
75 | unsigned long flags; | 75 | unsigned long flags; |
76 | |||
77 | /* | ||
78 | * The generic code assumes that it has two members of unknown type | ||
79 | * owned by the arch-specific code: | ||
80 | * | ||
81 | * insn - copy_insn() saves the original instruction here for | ||
82 | * arch_uprobe_analyze_insn(). | ||
83 | * | ||
84 | * ixol - potentially modified instruction to execute out of | ||
85 | * line, copied to xol_area by xol_get_insn_slot(). | ||
86 | */ | ||
76 | struct arch_uprobe arch; | 87 | struct arch_uprobe arch; |
77 | }; | 88 | }; |
78 | 89 | ||
@@ -86,6 +97,29 @@ struct return_instance { | |||
86 | }; | 97 | }; |
87 | 98 | ||
88 | /* | 99 | /* |
100 | * Execute out of line area: anonymous executable mapping installed | ||
101 | * by the probed task to execute the copy of the original instruction | ||
102 | * mangled by set_swbp(). | ||
103 | * | ||
104 | * On a breakpoint hit, thread contests for a slot. It frees the | ||
105 | * slot after singlestep. Currently a fixed number of slots are | ||
106 | * allocated. | ||
107 | */ | ||
108 | struct xol_area { | ||
109 | wait_queue_head_t wq; /* if all slots are busy */ | ||
110 | atomic_t slot_count; /* number of in-use slots */ | ||
111 | unsigned long *bitmap; /* 0 = free slot */ | ||
112 | struct page *page; | ||
113 | |||
114 | /* | ||
115 | * We keep the vma's vm_start rather than a pointer to the vma | ||
116 | * itself. The probed process or a naughty kernel module could make | ||
117 | * the vma go away, and we must handle that reasonably gracefully. | ||
118 | */ | ||
119 | unsigned long vaddr; /* Page(s) of instruction slots */ | ||
120 | }; | ||
121 | |||
122 | /* | ||
89 | * valid_vma: Verify if the specified vma is an executable vma | 123 | * valid_vma: Verify if the specified vma is an executable vma |
90 | * Relax restrictions while unregistering: vm_flags might have | 124 | * Relax restrictions while unregistering: vm_flags might have |
91 | * changed after breakpoint was inserted. | 125 | * changed after breakpoint was inserted. |
@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned | |||
330 | int __weak | 364 | int __weak |
331 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) | 365 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
332 | { | 366 | { |
333 | return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); | 367 | return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn); |
334 | } | 368 | } |
335 | 369 | ||
336 | static int match_uprobe(struct uprobe *l, struct uprobe *r) | 370 | static int match_uprobe(struct uprobe *l, struct uprobe *r) |
@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp) | |||
529 | { | 563 | { |
530 | struct address_space *mapping = uprobe->inode->i_mapping; | 564 | struct address_space *mapping = uprobe->inode->i_mapping; |
531 | loff_t offs = uprobe->offset; | 565 | loff_t offs = uprobe->offset; |
532 | void *insn = uprobe->arch.insn; | 566 | void *insn = &uprobe->arch.insn; |
533 | int size = MAX_UINSN_BYTES; | 567 | int size = sizeof(uprobe->arch.insn); |
534 | int len, err = -EIO; | 568 | int len, err = -EIO; |
535 | 569 | ||
536 | /* Copy only available bytes, -EIO if nothing was read */ | 570 | /* Copy only available bytes, -EIO if nothing was read */ |
@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, | |||
569 | goto out; | 603 | goto out; |
570 | 604 | ||
571 | ret = -ENOTSUPP; | 605 | ret = -ENOTSUPP; |
572 | if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn)) | 606 | if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) |
573 | goto out; | 607 | goto out; |
574 | 608 | ||
575 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); | 609 | ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); |
@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) | |||
1264 | 1298 | ||
1265 | /* Initialize the slot */ | 1299 | /* Initialize the slot */ |
1266 | copy_to_page(area->page, xol_vaddr, | 1300 | copy_to_page(area->page, xol_vaddr, |
1267 | uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); | 1301 | &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); |
1268 | /* | 1302 | /* |
1269 | * We probably need flush_icache_user_range() but it needs vma. | 1303 | * We probably need flush_icache_user_range() but it needs vma. |
1270 | * This should work on supported architectures too. | 1304 | * This should work on supported architectures too. |
@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg) | |||
1403 | 1437 | ||
1404 | static void dup_xol_work(struct callback_head *work) | 1438 | static void dup_xol_work(struct callback_head *work) |
1405 | { | 1439 | { |
1406 | kfree(work); | ||
1407 | |||
1408 | if (current->flags & PF_EXITING) | 1440 | if (current->flags & PF_EXITING) |
1409 | return; | 1441 | return; |
1410 | 1442 | ||
1411 | if (!__create_xol_area(current->utask->vaddr)) | 1443 | if (!__create_xol_area(current->utask->dup_xol_addr)) |
1412 | uprobe_warn(current, "dup xol area"); | 1444 | uprobe_warn(current, "dup xol area"); |
1413 | } | 1445 | } |
1414 | 1446 | ||
@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) | |||
1419 | { | 1451 | { |
1420 | struct uprobe_task *utask = current->utask; | 1452 | struct uprobe_task *utask = current->utask; |
1421 | struct mm_struct *mm = current->mm; | 1453 | struct mm_struct *mm = current->mm; |
1422 | struct callback_head *work; | ||
1423 | struct xol_area *area; | 1454 | struct xol_area *area; |
1424 | 1455 | ||
1425 | t->utask = NULL; | 1456 | t->utask = NULL; |
@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) | |||
1441 | if (mm == t->mm) | 1472 | if (mm == t->mm) |
1442 | return; | 1473 | return; |
1443 | 1474 | ||
1444 | /* TODO: move it into the union in uprobe_task */ | 1475 | t->utask->dup_xol_addr = area->vaddr; |
1445 | work = kmalloc(sizeof(*work), GFP_KERNEL); | 1476 | init_task_work(&t->utask->dup_xol_work, dup_xol_work); |
1446 | if (!work) | 1477 | task_work_add(t, &t->utask->dup_xol_work, true); |
1447 | return uprobe_warn(t, "dup xol area"); | ||
1448 | |||
1449 | t->utask->vaddr = area->vaddr; | ||
1450 | init_task_work(work, dup_xol_work); | ||
1451 | task_work_add(t, work, true); | ||
1452 | } | 1478 | } |
1453 | 1479 | ||
1454 | /* | 1480 | /* |