aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-11-09 13:49:39 -0500
committerOleg Nesterov <oleg@redhat.com>2013-11-20 10:31:01 -0500
commitc912dae60ae6f659455f239298110adc67a5f3e9 (patch)
tree1997566bb556e768f12881dd39d1d1036ef9e5b5
parent3d78e945b6249d4ef2308192343f8b203b1d7ea5 (diff)
uprobes: Cleanup !CONFIG_UPROBES decls, unexport xol_area
1. Don't include asm/uprobes.h unconditionally, we only need it if CONFIG_UPROBES. 2. Move the definition of "struct xol_area" into uprobes.c. Perhaps we should simply kill struct uprobes_state, it buys nothing. 3. Kill the dummy definition of uprobe_get_swbp_addr(), nobody except handle_swbp() needs it. 4. Purely cosmetic, but move the decl of uprobe_get_swbp_addr() up, close to other __weak helpers. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
-rw-r--r--include/linux/uprobes.h31
-rw-r--r--kernel/events/uprobes.c19
2 files changed, 23 insertions, 27 deletions
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 2225542624de..e32251e00e62 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -33,10 +33,6 @@ struct mm_struct;
33struct inode; 33struct inode;
34struct notifier_block; 34struct notifier_block;
35 35
36#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
37# include <asm/uprobes.h>
38#endif
39
40#define UPROBE_HANDLER_REMOVE 1 36#define UPROBE_HANDLER_REMOVE 1
41#define UPROBE_HANDLER_MASK 1 37#define UPROBE_HANDLER_MASK 1
42 38
@@ -61,6 +57,8 @@ struct uprobe_consumer {
61}; 57};
62 58
63#ifdef CONFIG_UPROBES 59#ifdef CONFIG_UPROBES
60#include <asm/uprobes.h>
61
64enum uprobe_task_state { 62enum uprobe_task_state {
65 UTASK_RUNNING, 63 UTASK_RUNNING,
66 UTASK_SSTEP, 64 UTASK_SSTEP,
@@ -93,24 +91,7 @@ struct uprobe_task {
93 unsigned int depth; 91 unsigned int depth;
94}; 92};
95 93
96/* 94struct xol_area;
97 * On a breakpoint hit, thread contests for a slot. It frees the
98 * slot after singlestep. Currently a fixed number of slots are
99 * allocated.
100 */
101struct xol_area {
102 wait_queue_head_t wq; /* if all slots are busy */
103 atomic_t slot_count; /* number of in-use slots */
104 unsigned long *bitmap; /* 0 = free slot */
105 struct page *page;
106
107 /*
108 * We keep the vma's vm_start rather than a pointer to the vma
109 * itself. The probed process or a naughty kernel module could make
110 * the vma go away, and we must handle that reasonably gracefully.
111 */
112 unsigned long vaddr; /* Page(s) of instruction slots */
113};
114 95
115struct uprobes_state { 96struct uprobes_state {
116 struct xol_area *xol_area; 97 struct xol_area *xol_area;
@@ -120,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
120extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 101extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
121extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); 102extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
122extern bool __weak is_trap_insn(uprobe_opcode_t *insn); 103extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
104extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
123extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); 105extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
124extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 106extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
125extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); 107extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
@@ -131,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
131extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); 113extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
132extern void uprobe_free_utask(struct task_struct *t); 114extern void uprobe_free_utask(struct task_struct *t);
133extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); 115extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
134extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
135extern int uprobe_post_sstep_notifier(struct pt_regs *regs); 116extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
136extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); 117extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
137extern void uprobe_notify_resume(struct pt_regs *regs); 118extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -187,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
187{ 168{
188 return false; 169 return false;
189} 170}
190static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
191{
192 return 0;
193}
194static inline void uprobe_free_utask(struct task_struct *t) 171static inline void uprobe_free_utask(struct task_struct *t)
195{ 172{
196} 173}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 445962a72498..51a7f535ff96 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -86,6 +86,25 @@ struct return_instance {
86}; 86};
87 87
88/* 88/*
89 * On a breakpoint hit, thread contests for a slot. It frees the
90 * slot after singlestep. Currently a fixed number of slots are
91 * allocated.
92 */
93struct xol_area {
94 wait_queue_head_t wq; /* if all slots are busy */
95 atomic_t slot_count; /* number of in-use slots */
96 unsigned long *bitmap; /* 0 = free slot */
97 struct page *page;
98
99 /*
100 * We keep the vma's vm_start rather than a pointer to the vma
101 * itself. The probed process or a naughty kernel module could make
102 * the vma go away, and we must handle that reasonably gracefully.
103 */
104 unsigned long vaddr; /* Page(s) of instruction slots */
105};
106
107/*
89 * valid_vma: Verify if the specified vma is an executable vma 108 * valid_vma: Verify if the specified vma is an executable vma
90 * Relax restrictions while unregistering: vm_flags might have 109 * Relax restrictions while unregistering: vm_flags might have
91 * changed after breakpoint was inserted. 110 * changed after breakpoint was inserted.