diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-09 13:22:48 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-11-08 09:34:42 -0500 |
commit | 24f1e32c60c45c89a997c73395b69c8af6f0a84e (patch) | |
tree | 4f30f16e18cb4abbcf96b3b331e6a3f01bfa26e6 /kernel/hw_breakpoint.c | |
parent | 2da3e160cb3d226d87b907fab26850d838ed8d7c (diff) |
hw-breakpoints: Rewrite the hw-breakpoints layer on top of perf events
This patch rebase the implementation of the breakpoints API on top of
perf events instances.
Each breakpoints are now perf events that handle the
register scheduling, thread/cpu attachment, etc..
The new layering is now made as follows:
ptrace kgdb ftrace perf syscall
\ | / /
\ | / /
/
Core breakpoint API /
/
| /
| /
Breakpoints perf events
|
|
Breakpoints PMU ---- Debug Register constraints handling
(Part of core breakpoint API)
|
|
Hardware debug registers
Reasons of this rewrite:
- Use the centralized/optimized pmu registers scheduling,
implying an easier arch integration
- More powerful register handling: perf attributes (pinned/flexible
events, exclusive/non-exclusive, tunable period, etc...)
Impact:
- New perf ABI: the hardware breakpoints counters
- Ptrace breakpoints setting remains tricky and still needs some per
thread breakpoints references.
Todo (in the order):
- Support breakpoints perf counter events for perf tools (ie: implement
perf_bpcounter_event())
- Support from perf tools
Changes in v2:
- Follow the perf "event " rename
- The ptrace regression have been fixed (ptrace breakpoint perf events
weren't released when a task ended)
- Drop the struct hw_breakpoint and store generic fields in
perf_event_attr.
- Separate core and arch specific headers, drop
asm-generic/hw_breakpoint.h and create linux/hw_breakpoint.h
- Use new generic len/type for breakpoint
- Handle off case: when breakpoints api is not supported by an arch
Changes in v3:
- Fix broken CONFIG_KVM, we need to propagate the breakpoint api
changes to kvm when we exit the guest and restore the bp registers
to the host.
Changes in v4:
- Drop the hw_breakpoint_restore() stub as it is only used by KVM
- EXPORT_SYMBOL_GPL hw_breakpoint_restore() as KVM can be built as a
module
- Restore the breakpoints unconditionally on kvm guest exit:
TIF_DEBUG_THREAD doesn't anymore cover every cases of running
breakpoints and vcpu->arch.switch_db_regs might not always be
set when the guest used debug registers.
(Waiting for a reliable optimization)
Changes in v5:
- Split-up the asm-generic/hw-breakpoint.h moving to
linux/hw_breakpoint.h into a separate patch
- Optimize the breakpoints restoring while switching from kvm guest
to host. We only want to restore the state if we have active
breakpoints to the host, otherwise we don't care about messed-up
address registers.
- Add asm/hw_breakpoint.h to Kbuild
- Fix bad breakpoint type in trace_selftest.c
Changes in v6:
- Fix wrong header inclusion in trace.h (triggered a build
error with CONFIG_FTRACE_SELFTEST
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r-- | kernel/hw_breakpoint.c | 424 |
1 files changed, 170 insertions, 254 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index c1f64e65a9f3..08f6d0163201 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * | 15 | * |
16 | * Copyright (C) 2007 Alan Stern | 16 | * Copyright (C) 2007 Alan Stern |
17 | * Copyright (C) IBM Corporation, 2009 | 17 | * Copyright (C) IBM Corporation, 2009 |
18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> | ||
18 | */ | 19 | */ |
19 | 20 | ||
20 | /* | 21 | /* |
@@ -35,334 +36,242 @@ | |||
35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
36 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
37 | 38 | ||
38 | #include <asm/hw_breakpoint.h> | 39 | #include <linux/hw_breakpoint.h> |
40 | |||
39 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
40 | 42 | ||
41 | #ifdef CONFIG_X86 | 43 | #ifdef CONFIG_X86 |
42 | #include <asm/debugreg.h> | 44 | #include <asm/debugreg.h> |
43 | #endif | 45 | #endif |
44 | /* | ||
45 | * Spinlock that protects all (un)register operations over kernel/user-space | ||
46 | * breakpoint requests | ||
47 | */ | ||
48 | static DEFINE_SPINLOCK(hw_breakpoint_lock); | ||
49 | |||
50 | /* Array of kernel-space breakpoint structures */ | ||
51 | struct hw_breakpoint *hbp_kernel[HBP_NUM]; | ||
52 | |||
53 | /* | ||
54 | * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being | ||
55 | * modified but we need the older copy to handle any hbp exceptions. It will | ||
56 | * sync with hbp_kernel[] value after updation is done through IPIs. | ||
57 | */ | ||
58 | DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); | ||
59 | |||
60 | /* | ||
61 | * Kernel breakpoints grow downwards, starting from HBP_NUM | ||
62 | * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for | ||
63 | * kernel-space request. We will initialise it here and not in an __init | ||
64 | * routine because load_debug_registers(), which uses this variable can be | ||
65 | * called very early during CPU initialisation. | ||
66 | */ | ||
67 | unsigned int hbp_kernel_pos = HBP_NUM; | ||
68 | 46 | ||
69 | /* | 47 | static atomic_t bp_slot; |
70 | * An array containing refcount of threads using a given bkpt register | ||
71 | * Accesses are synchronised by acquiring hw_breakpoint_lock | ||
72 | */ | ||
73 | unsigned int hbp_user_refcount[HBP_NUM]; | ||
74 | 48 | ||
75 | /* | 49 | int reserve_bp_slot(struct perf_event *bp) |
76 | * Load the debug registers during startup of a CPU. | ||
77 | */ | ||
78 | void load_debug_registers(void) | ||
79 | { | 50 | { |
80 | unsigned long flags; | 51 | if (atomic_inc_return(&bp_slot) == HBP_NUM) { |
81 | struct task_struct *tsk = current; | 52 | atomic_dec(&bp_slot); |
82 | |||
83 | spin_lock_bh(&hw_breakpoint_lock); | ||
84 | |||
85 | /* Prevent IPIs for new kernel breakpoint updates */ | ||
86 | local_irq_save(flags); | ||
87 | arch_update_kernel_hw_breakpoint(NULL); | ||
88 | local_irq_restore(flags); | ||
89 | |||
90 | if (test_tsk_thread_flag(tsk, TIF_DEBUG)) | ||
91 | arch_install_thread_hw_breakpoint(tsk); | ||
92 | |||
93 | spin_unlock_bh(&hw_breakpoint_lock); | ||
94 | } | ||
95 | 53 | ||
96 | /* | 54 | return -ENOSPC; |
97 | * Erase all the hardware breakpoint info associated with a thread. | ||
98 | * | ||
99 | * If tsk != current then tsk must not be usable (for example, a | ||
100 | * child being cleaned up from a failed fork). | ||
101 | */ | ||
102 | void flush_thread_hw_breakpoint(struct task_struct *tsk) | ||
103 | { | ||
104 | int i; | ||
105 | struct thread_struct *thread = &(tsk->thread); | ||
106 | |||
107 | spin_lock_bh(&hw_breakpoint_lock); | ||
108 | |||
109 | /* The thread no longer has any breakpoints associated with it */ | ||
110 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
111 | for (i = 0; i < HBP_NUM; i++) { | ||
112 | if (thread->hbp[i]) { | ||
113 | hbp_user_refcount[i]--; | ||
114 | kfree(thread->hbp[i]); | ||
115 | thread->hbp[i] = NULL; | ||
116 | } | ||
117 | } | 55 | } |
118 | 56 | ||
119 | arch_flush_thread_hw_breakpoint(tsk); | 57 | return 0; |
120 | |||
121 | /* Actually uninstall the breakpoints if necessary */ | ||
122 | if (tsk == current) | ||
123 | arch_uninstall_thread_hw_breakpoint(); | ||
124 | spin_unlock_bh(&hw_breakpoint_lock); | ||
125 | } | 58 | } |
126 | 59 | ||
127 | /* | 60 | void release_bp_slot(struct perf_event *bp) |
128 | * Copy the hardware breakpoint info from a thread to its cloned child. | ||
129 | */ | ||
130 | int copy_thread_hw_breakpoint(struct task_struct *tsk, | ||
131 | struct task_struct *child, unsigned long clone_flags) | ||
132 | { | 61 | { |
133 | /* | 62 | atomic_dec(&bp_slot); |
134 | * We will assume that breakpoint settings are not inherited | ||
135 | * and the child starts out with no debug registers set. | ||
136 | * But what about CLONE_PTRACE? | ||
137 | */ | ||
138 | clear_tsk_thread_flag(child, TIF_DEBUG); | ||
139 | |||
140 | /* We will call flush routine since the debugregs are not inherited */ | ||
141 | arch_flush_thread_hw_breakpoint(child); | ||
142 | |||
143 | return 0; | ||
144 | } | 63 | } |
145 | 64 | ||
146 | static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk, | 65 | int __register_perf_hw_breakpoint(struct perf_event *bp) |
147 | struct hw_breakpoint *bp) | ||
148 | { | 66 | { |
149 | struct thread_struct *thread = &(tsk->thread); | 67 | int ret; |
150 | int rc; | ||
151 | 68 | ||
152 | /* Do not overcommit. Fail if kernel has used the hbp registers */ | 69 | ret = reserve_bp_slot(bp); |
153 | if (pos >= hbp_kernel_pos) | 70 | if (ret) |
154 | return -ENOSPC; | 71 | return ret; |
155 | 72 | ||
156 | rc = arch_validate_hwbkpt_settings(bp, tsk); | 73 | if (!bp->attr.disabled) |
157 | if (rc) | 74 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
158 | return rc; | ||
159 | 75 | ||
160 | thread->hbp[pos] = bp; | 76 | return ret; |
161 | hbp_user_refcount[pos]++; | 77 | } |
162 | 78 | ||
163 | arch_update_user_hw_breakpoint(pos, tsk); | 79 | int register_perf_hw_breakpoint(struct perf_event *bp) |
164 | /* | 80 | { |
165 | * Does it need to be installed right now? | 81 | bp->callback = perf_bp_event; |
166 | * Otherwise it will get installed the next time tsk runs | ||
167 | */ | ||
168 | if (tsk == current) | ||
169 | arch_install_thread_hw_breakpoint(tsk); | ||
170 | 82 | ||
171 | return rc; | 83 | return __register_perf_hw_breakpoint(bp); |
172 | } | 84 | } |
173 | 85 | ||
174 | /* | 86 | /* |
175 | * Modify the address of a hbp register already in use by the task | 87 | * Register a breakpoint bound to a task and a given cpu. |
176 | * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint() | 88 | * If cpu is -1, the breakpoint is active for the task in every cpu |
89 | * If the task is -1, the breakpoint is active for every tasks in the given | ||
90 | * cpu. | ||
177 | */ | 91 | */ |
178 | static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk, | 92 | static struct perf_event * |
179 | struct hw_breakpoint *bp) | 93 | register_user_hw_breakpoint_cpu(unsigned long addr, |
94 | int len, | ||
95 | int type, | ||
96 | perf_callback_t triggered, | ||
97 | pid_t pid, | ||
98 | int cpu, | ||
99 | bool active) | ||
180 | { | 100 | { |
181 | struct thread_struct *thread = &(tsk->thread); | 101 | struct perf_event_attr *attr; |
182 | 102 | struct perf_event *bp; | |
183 | if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk))) | 103 | |
184 | return -EINVAL; | 104 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); |
185 | 105 | if (!attr) | |
186 | if (thread->hbp[pos] == NULL) | 106 | return ERR_PTR(-ENOMEM); |
187 | return -EINVAL; | 107 | |
188 | 108 | attr->type = PERF_TYPE_BREAKPOINT; | |
189 | thread->hbp[pos] = bp; | 109 | attr->size = sizeof(*attr); |
110 | attr->bp_addr = addr; | ||
111 | attr->bp_len = len; | ||
112 | attr->bp_type = type; | ||
190 | /* | 113 | /* |
191 | * 'pos' must be that of a hbp register already used by 'tsk' | 114 | * Such breakpoints are used by debuggers to trigger signals when |
192 | * Otherwise arch_modify_user_hw_breakpoint() will fail | 115 | * we hit the excepted memory op. We can't miss such events, they |
116 | * must be pinned. | ||
193 | */ | 117 | */ |
194 | arch_update_user_hw_breakpoint(pos, tsk); | 118 | attr->pinned = 1; |
195 | 119 | ||
196 | if (tsk == current) | 120 | if (!active) |
197 | arch_install_thread_hw_breakpoint(tsk); | 121 | attr->disabled = 1; |
198 | 122 | ||
199 | return 0; | 123 | bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered); |
200 | } | 124 | kfree(attr); |
201 | |||
202 | static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk) | ||
203 | { | ||
204 | hbp_user_refcount[pos]--; | ||
205 | tsk->thread.hbp[pos] = NULL; | ||
206 | 125 | ||
207 | arch_update_user_hw_breakpoint(pos, tsk); | 126 | return bp; |
208 | |||
209 | if (tsk == current) | ||
210 | arch_install_thread_hw_breakpoint(tsk); | ||
211 | } | 127 | } |
212 | 128 | ||
213 | /** | 129 | /** |
214 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | 130 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
131 | * @addr: is the memory address that triggers the breakpoint | ||
132 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | ||
133 | * @type: the type of the access to the memory (read/write/exec) | ||
134 | * @triggered: callback to trigger when we hit the breakpoint | ||
215 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 135 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
216 | * @bp: the breakpoint structure to register | 136 | * @active: should we activate it while registering it |
217 | * | ||
218 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | ||
219 | * @bp->triggered must be set properly before invocation | ||
220 | * | 137 | * |
221 | */ | 138 | */ |
222 | int register_user_hw_breakpoint(struct task_struct *tsk, | 139 | struct perf_event * |
223 | struct hw_breakpoint *bp) | 140 | register_user_hw_breakpoint(unsigned long addr, |
141 | int len, | ||
142 | int type, | ||
143 | perf_callback_t triggered, | ||
144 | struct task_struct *tsk, | ||
145 | bool active) | ||
224 | { | 146 | { |
225 | struct thread_struct *thread = &(tsk->thread); | 147 | return register_user_hw_breakpoint_cpu(addr, len, type, triggered, |
226 | int i, rc = -ENOSPC; | 148 | tsk->pid, -1, active); |
227 | |||
228 | spin_lock_bh(&hw_breakpoint_lock); | ||
229 | |||
230 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
231 | if (!thread->hbp[i]) { | ||
232 | rc = __register_user_hw_breakpoint(i, tsk, bp); | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | if (!rc) | ||
237 | set_tsk_thread_flag(tsk, TIF_DEBUG); | ||
238 | |||
239 | spin_unlock_bh(&hw_breakpoint_lock); | ||
240 | return rc; | ||
241 | } | 149 | } |
242 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 150 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
243 | 151 | ||
244 | /** | 152 | /** |
245 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | 153 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
154 | * @bp: the breakpoint structure to modify | ||
155 | * @addr: is the memory address that triggers the breakpoint | ||
156 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | ||
157 | * @type: the type of the access to the memory (read/write/exec) | ||
158 | * @triggered: callback to trigger when we hit the breakpoint | ||
246 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 159 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
247 | * @bp: the breakpoint structure to unregister | 160 | * @active: should we activate it while registering it |
248 | * | ||
249 | */ | 161 | */ |
250 | int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp) | 162 | struct perf_event * |
163 | modify_user_hw_breakpoint(struct perf_event *bp, | ||
164 | unsigned long addr, | ||
165 | int len, | ||
166 | int type, | ||
167 | perf_callback_t triggered, | ||
168 | struct task_struct *tsk, | ||
169 | bool active) | ||
251 | { | 170 | { |
252 | struct thread_struct *thread = &(tsk->thread); | 171 | /* |
253 | int i, ret = -ENOENT; | 172 | * FIXME: do it without unregistering |
173 | * - We don't want to lose our slot | ||
174 | * - If the new bp is incorrect, don't lose the older one | ||
175 | */ | ||
176 | unregister_hw_breakpoint(bp); | ||
254 | 177 | ||
255 | spin_lock_bh(&hw_breakpoint_lock); | 178 | return register_user_hw_breakpoint(addr, len, type, triggered, |
256 | for (i = 0; i < hbp_kernel_pos; i++) { | 179 | tsk, active); |
257 | if (bp == thread->hbp[i]) { | ||
258 | ret = __modify_user_hw_breakpoint(i, tsk, bp); | ||
259 | break; | ||
260 | } | ||
261 | } | ||
262 | spin_unlock_bh(&hw_breakpoint_lock); | ||
263 | return ret; | ||
264 | } | 180 | } |
265 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 181 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
266 | 182 | ||
267 | /** | 183 | /** |
268 | * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint | 184 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
269 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
270 | * @bp: the breakpoint structure to unregister | 185 | * @bp: the breakpoint structure to unregister |
271 | * | ||
272 | */ | 186 | */ |
273 | void unregister_user_hw_breakpoint(struct task_struct *tsk, | 187 | void unregister_hw_breakpoint(struct perf_event *bp) |
274 | struct hw_breakpoint *bp) | ||
275 | { | 188 | { |
276 | struct thread_struct *thread = &(tsk->thread); | 189 | if (!bp) |
277 | int i, pos = -1, hbp_counter = 0; | 190 | return; |
278 | 191 | perf_event_release_kernel(bp); | |
279 | spin_lock_bh(&hw_breakpoint_lock); | 192 | } |
280 | for (i = 0; i < hbp_kernel_pos; i++) { | 193 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
281 | if (thread->hbp[i]) | 194 | |
282 | hbp_counter++; | 195 | static struct perf_event * |
283 | if (bp == thread->hbp[i]) | 196 | register_kernel_hw_breakpoint_cpu(unsigned long addr, |
284 | pos = i; | 197 | int len, |
285 | } | 198 | int type, |
286 | if (pos >= 0) { | 199 | perf_callback_t triggered, |
287 | __unregister_user_hw_breakpoint(pos, tsk); | 200 | int cpu, |
288 | hbp_counter--; | 201 | bool active) |
289 | } | 202 | { |
290 | if (!hbp_counter) | 203 | return register_user_hw_breakpoint_cpu(addr, len, type, triggered, |
291 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | 204 | -1, cpu, active); |
292 | |||
293 | spin_unlock_bh(&hw_breakpoint_lock); | ||
294 | } | 205 | } |
295 | EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint); | ||
296 | 206 | ||
297 | /** | 207 | /** |
298 | * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space | 208 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
299 | * @bp: the breakpoint structure to register | 209 | * @addr: is the memory address that triggers the breakpoint |
300 | * | 210 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) |
301 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | 211 | * @type: the type of the access to the memory (read/write/exec) |
302 | * @bp->triggered must be set properly before invocation | 212 | * @triggered: callback to trigger when we hit the breakpoint |
213 | * @active: should we activate it while registering it | ||
303 | * | 214 | * |
215 | * @return a set of per_cpu pointers to perf events | ||
304 | */ | 216 | */ |
305 | int register_kernel_hw_breakpoint(struct hw_breakpoint *bp) | 217 | struct perf_event ** |
218 | register_wide_hw_breakpoint(unsigned long addr, | ||
219 | int len, | ||
220 | int type, | ||
221 | perf_callback_t triggered, | ||
222 | bool active) | ||
306 | { | 223 | { |
307 | int rc; | 224 | struct perf_event **cpu_events, **pevent, *bp; |
225 | long err; | ||
226 | int cpu; | ||
227 | |||
228 | cpu_events = alloc_percpu(typeof(*cpu_events)); | ||
229 | if (!cpu_events) | ||
230 | return ERR_PTR(-ENOMEM); | ||
308 | 231 | ||
309 | rc = arch_validate_hwbkpt_settings(bp, NULL); | 232 | for_each_possible_cpu(cpu) { |
310 | if (rc) | 233 | pevent = per_cpu_ptr(cpu_events, cpu); |
311 | return rc; | 234 | bp = register_kernel_hw_breakpoint_cpu(addr, len, type, |
235 | triggered, cpu, active); | ||
312 | 236 | ||
313 | spin_lock_bh(&hw_breakpoint_lock); | 237 | *pevent = bp; |
314 | 238 | ||
315 | rc = -ENOSPC; | 239 | if (IS_ERR(bp) || !bp) { |
316 | /* Check if we are over-committing */ | 240 | err = PTR_ERR(bp); |
317 | if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) { | 241 | goto fail; |
318 | hbp_kernel_pos--; | 242 | } |
319 | hbp_kernel[hbp_kernel_pos] = bp; | ||
320 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
321 | rc = 0; | ||
322 | } | 243 | } |
323 | 244 | ||
324 | spin_unlock_bh(&hw_breakpoint_lock); | 245 | return cpu_events; |
325 | return rc; | 246 | |
247 | fail: | ||
248 | for_each_possible_cpu(cpu) { | ||
249 | pevent = per_cpu_ptr(cpu_events, cpu); | ||
250 | if (IS_ERR(*pevent) || !*pevent) | ||
251 | break; | ||
252 | unregister_hw_breakpoint(*pevent); | ||
253 | } | ||
254 | free_percpu(cpu_events); | ||
255 | /* return the error if any */ | ||
256 | return ERR_PTR(err); | ||
326 | } | 257 | } |
327 | EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint); | ||
328 | 258 | ||
329 | /** | 259 | /** |
330 | * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space | 260 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
331 | * @bp: the breakpoint structure to unregister | 261 | * @cpu_events: the per cpu set of events to unregister |
332 | * | ||
333 | * Uninstalls and unregisters @bp. | ||
334 | */ | 262 | */ |
335 | void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp) | 263 | void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) |
336 | { | 264 | { |
337 | int i, j; | 265 | int cpu; |
338 | 266 | struct perf_event **pevent; | |
339 | spin_lock_bh(&hw_breakpoint_lock); | ||
340 | |||
341 | /* Find the 'bp' in our list of breakpoints for kernel */ | ||
342 | for (i = hbp_kernel_pos; i < HBP_NUM; i++) | ||
343 | if (bp == hbp_kernel[i]) | ||
344 | break; | ||
345 | 267 | ||
346 | /* Check if we did not find a match for 'bp'. If so return early */ | 268 | for_each_possible_cpu(cpu) { |
347 | if (i == HBP_NUM) { | 269 | pevent = per_cpu_ptr(cpu_events, cpu); |
348 | spin_unlock_bh(&hw_breakpoint_lock); | 270 | unregister_hw_breakpoint(*pevent); |
349 | return; | ||
350 | } | 271 | } |
351 | 272 | free_percpu(cpu_events); | |
352 | /* | ||
353 | * We'll shift the breakpoints one-level above to compact if | ||
354 | * unregistration creates a hole | ||
355 | */ | ||
356 | for (j = i; j > hbp_kernel_pos; j--) | ||
357 | hbp_kernel[j] = hbp_kernel[j-1]; | ||
358 | |||
359 | hbp_kernel[hbp_kernel_pos] = NULL; | ||
360 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
361 | hbp_kernel_pos++; | ||
362 | |||
363 | spin_unlock_bh(&hw_breakpoint_lock); | ||
364 | } | 273 | } |
365 | EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint); | 274 | |
366 | 275 | ||
367 | static struct notifier_block hw_breakpoint_exceptions_nb = { | 276 | static struct notifier_block hw_breakpoint_exceptions_nb = { |
368 | .notifier_call = hw_breakpoint_exceptions_notify, | 277 | .notifier_call = hw_breakpoint_exceptions_notify, |
@@ -374,5 +283,12 @@ static int __init init_hw_breakpoint(void) | |||
374 | { | 283 | { |
375 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | 284 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
376 | } | 285 | } |
377 | |||
378 | core_initcall(init_hw_breakpoint); | 286 | core_initcall(init_hw_breakpoint); |
287 | |||
288 | |||
289 | struct pmu perf_ops_bp = { | ||
290 | .enable = arch_install_hw_breakpoint, | ||
291 | .disable = arch_uninstall_hw_breakpoint, | ||
292 | .read = hw_breakpoint_pmu_read, | ||
293 | .unthrottle = hw_breakpoint_pmu_unthrottle | ||
294 | }; | ||