aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorK.Prasad <prasad@linux.vnet.ibm.com>2009-06-01 14:13:33 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-06-02 16:46:58 -0400
commit62a038d34db26771756cf3689e36de638bedd2c4 (patch)
tree4b435b34474a889d8a5c82b687e981d9e09abc91
parentb332828c39326b1dca617f387dd15d12e81cd5f0 (diff)
hw-breakpoints: introducing generic hardware breakpoint handler interfaces
This patch introduces the generic Hardware Breakpoint interfaces for both user and kernel space requests. This core Api handles the hardware breakpoints through new helpers. It handles the user-space breakpoints and kernel breakpoints in front of arch implementation. One can choose kernel wide breakpoints using the following helpers and passing them a generic struct hw_breakpoint: - register_kernel_hw_breakpoint() - unregister_kernel_hw_breakpoint() - modify_kernel_hw_breakpoint() On the other side, you can choose per task breakpoints. - register_user_hw_breakpoint() - unregister_user_hw_breakpoint() - modify_user_hw_breakpoint() [ fweisbec@gmail.com: fix conflict against perfcounter ] Original-patch-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com> Reviewed-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r--arch/Kconfig4
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/hw_breakpoint.c378
3 files changed, 383 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 78a35e9dc104..1adf2d0e6356 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -112,3 +112,7 @@ config HAVE_DMA_API_DEBUG
112 112
113config HAVE_DEFAULT_NO_SPIN_MUTEXES 113config HAVE_DEFAULT_NO_SPIN_MUTEXES
114 bool 114 bool
115
116config HAVE_HW_BREAKPOINT
117 bool
118
diff --git a/kernel/Makefile b/kernel/Makefile
index a35eee3436de..18ad1110b226 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_TRACING) += trace/
96obj-$(CONFIG_X86_DS) += trace/ 96obj-$(CONFIG_X86_DS) += trace/
97obj-$(CONFIG_SMP) += sched_cpupri.o 97obj-$(CONFIG_SMP) += sched_cpupri.o
98obj-$(CONFIG_SLOW_WORK) += slow-work.o 98obj-$(CONFIG_SLOW_WORK) += slow-work.o
99obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
99 100
100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 101ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
101# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 102# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..c1f64e65a9f3
--- /dev/null
+++ b/kernel/hw_breakpoint.c
@@ -0,0 +1,378 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 */
19
20/*
21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
22 * using the CPU's debug registers.
23 * This file contains the arch-independent routines.
24 */
25
26#include <linux/irqflags.h>
27#include <linux/kallsyms.h>
28#include <linux/notifier.h>
29#include <linux/kprobes.h>
30#include <linux/kdebug.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
34#include <linux/sched.h>
35#include <linux/init.h>
36#include <linux/smp.h>
37
38#include <asm/hw_breakpoint.h>
39#include <asm/processor.h>
40
41#ifdef CONFIG_X86
42#include <asm/debugreg.h>
43#endif
44/*
45 * Spinlock that protects all (un)register operations over kernel/user-space
46 * breakpoint requests
47 */
48static DEFINE_SPINLOCK(hw_breakpoint_lock);
49
50/* Array of kernel-space breakpoint structures */
51struct hw_breakpoint *hbp_kernel[HBP_NUM];
52
53/*
54 * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being
55 * modified but we need the older copy to handle any hbp exceptions. It will
56 * sync with hbp_kernel[] value after updation is done through IPIs.
57 */
58DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]);
59
60/*
61 * Kernel breakpoints grow downwards, starting from HBP_NUM
62 * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for
63 * kernel-space request. We will initialise it here and not in an __init
64 * routine because load_debug_registers(), which uses this variable can be
65 * called very early during CPU initialisation.
66 */
67unsigned int hbp_kernel_pos = HBP_NUM;
68
69/*
70 * An array containing refcount of threads using a given bkpt register
71 * Accesses are synchronised by acquiring hw_breakpoint_lock
72 */
73unsigned int hbp_user_refcount[HBP_NUM];
74
75/*
76 * Load the debug registers during startup of a CPU.
77 */
78void load_debug_registers(void)
79{
80 unsigned long flags;
81 struct task_struct *tsk = current;
82
83 spin_lock_bh(&hw_breakpoint_lock);
84
85 /* Prevent IPIs for new kernel breakpoint updates */
86 local_irq_save(flags);
87 arch_update_kernel_hw_breakpoint(NULL);
88 local_irq_restore(flags);
89
90 if (test_tsk_thread_flag(tsk, TIF_DEBUG))
91 arch_install_thread_hw_breakpoint(tsk);
92
93 spin_unlock_bh(&hw_breakpoint_lock);
94}
95
96/*
97 * Erase all the hardware breakpoint info associated with a thread.
98 *
99 * If tsk != current then tsk must not be usable (for example, a
100 * child being cleaned up from a failed fork).
101 */
102void flush_thread_hw_breakpoint(struct task_struct *tsk)
103{
104 int i;
105 struct thread_struct *thread = &(tsk->thread);
106
107 spin_lock_bh(&hw_breakpoint_lock);
108
109 /* The thread no longer has any breakpoints associated with it */
110 clear_tsk_thread_flag(tsk, TIF_DEBUG);
111 for (i = 0; i < HBP_NUM; i++) {
112 if (thread->hbp[i]) {
113 hbp_user_refcount[i]--;
114 kfree(thread->hbp[i]);
115 thread->hbp[i] = NULL;
116 }
117 }
118
119 arch_flush_thread_hw_breakpoint(tsk);
120
121 /* Actually uninstall the breakpoints if necessary */
122 if (tsk == current)
123 arch_uninstall_thread_hw_breakpoint();
124 spin_unlock_bh(&hw_breakpoint_lock);
125}
126
127/*
128 * Copy the hardware breakpoint info from a thread to its cloned child.
129 */
130int copy_thread_hw_breakpoint(struct task_struct *tsk,
131 struct task_struct *child, unsigned long clone_flags)
132{
133 /*
134 * We will assume that breakpoint settings are not inherited
135 * and the child starts out with no debug registers set.
136 * But what about CLONE_PTRACE?
137 */
138 clear_tsk_thread_flag(child, TIF_DEBUG);
139
140 /* We will call flush routine since the debugregs are not inherited */
141 arch_flush_thread_hw_breakpoint(child);
142
143 return 0;
144}
145
146static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk,
147 struct hw_breakpoint *bp)
148{
149 struct thread_struct *thread = &(tsk->thread);
150 int rc;
151
152 /* Do not overcommit. Fail if kernel has used the hbp registers */
153 if (pos >= hbp_kernel_pos)
154 return -ENOSPC;
155
156 rc = arch_validate_hwbkpt_settings(bp, tsk);
157 if (rc)
158 return rc;
159
160 thread->hbp[pos] = bp;
161 hbp_user_refcount[pos]++;
162
163 arch_update_user_hw_breakpoint(pos, tsk);
164 /*
165 * Does it need to be installed right now?
166 * Otherwise it will get installed the next time tsk runs
167 */
168 if (tsk == current)
169 arch_install_thread_hw_breakpoint(tsk);
170
171 return rc;
172}
173
174/*
175 * Modify the address of a hbp register already in use by the task
176 * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint()
177 */
178static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk,
179 struct hw_breakpoint *bp)
180{
181 struct thread_struct *thread = &(tsk->thread);
182
183 if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk)))
184 return -EINVAL;
185
186 if (thread->hbp[pos] == NULL)
187 return -EINVAL;
188
189 thread->hbp[pos] = bp;
190 /*
191 * 'pos' must be that of a hbp register already used by 'tsk'
192 * Otherwise arch_modify_user_hw_breakpoint() will fail
193 */
194 arch_update_user_hw_breakpoint(pos, tsk);
195
196 if (tsk == current)
197 arch_install_thread_hw_breakpoint(tsk);
198
199 return 0;
200}
201
202static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk)
203{
204 hbp_user_refcount[pos]--;
205 tsk->thread.hbp[pos] = NULL;
206
207 arch_update_user_hw_breakpoint(pos, tsk);
208
209 if (tsk == current)
210 arch_install_thread_hw_breakpoint(tsk);
211}
212
213/**
214 * register_user_hw_breakpoint - register a hardware breakpoint for user space
215 * @tsk: pointer to 'task_struct' of the process to which the address belongs
216 * @bp: the breakpoint structure to register
217 *
218 * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and
219 * @bp->triggered must be set properly before invocation
220 *
221 */
222int register_user_hw_breakpoint(struct task_struct *tsk,
223 struct hw_breakpoint *bp)
224{
225 struct thread_struct *thread = &(tsk->thread);
226 int i, rc = -ENOSPC;
227
228 spin_lock_bh(&hw_breakpoint_lock);
229
230 for (i = 0; i < hbp_kernel_pos; i++) {
231 if (!thread->hbp[i]) {
232 rc = __register_user_hw_breakpoint(i, tsk, bp);
233 break;
234 }
235 }
236 if (!rc)
237 set_tsk_thread_flag(tsk, TIF_DEBUG);
238
239 spin_unlock_bh(&hw_breakpoint_lock);
240 return rc;
241}
242EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
243
244/**
245 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
246 * @tsk: pointer to 'task_struct' of the process to which the address belongs
247 * @bp: the breakpoint structure to unregister
248 *
249 */
250int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp)
251{
252 struct thread_struct *thread = &(tsk->thread);
253 int i, ret = -ENOENT;
254
255 spin_lock_bh(&hw_breakpoint_lock);
256 for (i = 0; i < hbp_kernel_pos; i++) {
257 if (bp == thread->hbp[i]) {
258 ret = __modify_user_hw_breakpoint(i, tsk, bp);
259 break;
260 }
261 }
262 spin_unlock_bh(&hw_breakpoint_lock);
263 return ret;
264}
265EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
266
267/**
268 * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint
269 * @tsk: pointer to 'task_struct' of the process to which the address belongs
270 * @bp: the breakpoint structure to unregister
271 *
272 */
273void unregister_user_hw_breakpoint(struct task_struct *tsk,
274 struct hw_breakpoint *bp)
275{
276 struct thread_struct *thread = &(tsk->thread);
277 int i, pos = -1, hbp_counter = 0;
278
279 spin_lock_bh(&hw_breakpoint_lock);
280 for (i = 0; i < hbp_kernel_pos; i++) {
281 if (thread->hbp[i])
282 hbp_counter++;
283 if (bp == thread->hbp[i])
284 pos = i;
285 }
286 if (pos >= 0) {
287 __unregister_user_hw_breakpoint(pos, tsk);
288 hbp_counter--;
289 }
290 if (!hbp_counter)
291 clear_tsk_thread_flag(tsk, TIF_DEBUG);
292
293 spin_unlock_bh(&hw_breakpoint_lock);
294}
295EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint);
296
297/**
298 * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space
299 * @bp: the breakpoint structure to register
300 *
301 * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and
302 * @bp->triggered must be set properly before invocation
303 *
304 */
305int register_kernel_hw_breakpoint(struct hw_breakpoint *bp)
306{
307 int rc;
308
309 rc = arch_validate_hwbkpt_settings(bp, NULL);
310 if (rc)
311 return rc;
312
313 spin_lock_bh(&hw_breakpoint_lock);
314
315 rc = -ENOSPC;
316 /* Check if we are over-committing */
317 if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) {
318 hbp_kernel_pos--;
319 hbp_kernel[hbp_kernel_pos] = bp;
320 on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1);
321 rc = 0;
322 }
323
324 spin_unlock_bh(&hw_breakpoint_lock);
325 return rc;
326}
327EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint);
328
329/**
330 * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space
331 * @bp: the breakpoint structure to unregister
332 *
333 * Uninstalls and unregisters @bp.
334 */
335void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp)
336{
337 int i, j;
338
339 spin_lock_bh(&hw_breakpoint_lock);
340
341 /* Find the 'bp' in our list of breakpoints for kernel */
342 for (i = hbp_kernel_pos; i < HBP_NUM; i++)
343 if (bp == hbp_kernel[i])
344 break;
345
346 /* Check if we did not find a match for 'bp'. If so return early */
347 if (i == HBP_NUM) {
348 spin_unlock_bh(&hw_breakpoint_lock);
349 return;
350 }
351
352 /*
353 * We'll shift the breakpoints one-level above to compact if
354 * unregistration creates a hole
355 */
356 for (j = i; j > hbp_kernel_pos; j--)
357 hbp_kernel[j] = hbp_kernel[j-1];
358
359 hbp_kernel[hbp_kernel_pos] = NULL;
360 on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1);
361 hbp_kernel_pos++;
362
363 spin_unlock_bh(&hw_breakpoint_lock);
364}
365EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint);
366
367static struct notifier_block hw_breakpoint_exceptions_nb = {
368 .notifier_call = hw_breakpoint_exceptions_notify,
369 /* we need to be notified first */
370 .priority = 0x7fffffff
371};
372
373static int __init init_hw_breakpoint(void)
374{
375 return register_die_notifier(&hw_breakpoint_exceptions_nb);
376}
377
378core_initcall(init_hw_breakpoint);