diff options
-rw-r--r-- | arch/x86/kernel/kgdb.c | 51 | ||||
-rw-r--r-- | include/linux/hw_breakpoint.h | 2 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 52 |
3 files changed, 95 insertions, 10 deletions
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 62bea7307eaa..bfba6019d762 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -239,6 +239,49 @@ static void kgdb_correct_hw_break(void) | |||
239 | hw_breakpoint_restore(); | 239 | hw_breakpoint_restore(); |
240 | } | 240 | } |
241 | 241 | ||
242 | static int hw_break_reserve_slot(int breakno) | ||
243 | { | ||
244 | int cpu; | ||
245 | int cnt = 0; | ||
246 | struct perf_event **pevent; | ||
247 | |||
248 | for_each_online_cpu(cpu) { | ||
249 | cnt++; | ||
250 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
251 | if (dbg_reserve_bp_slot(*pevent)) | ||
252 | goto fail; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | |||
257 | fail: | ||
258 | for_each_online_cpu(cpu) { | ||
259 | cnt--; | ||
260 | if (!cnt) | ||
261 | break; | ||
262 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
263 | dbg_release_bp_slot(*pevent); | ||
264 | } | ||
265 | return -1; | ||
266 | } | ||
267 | |||
268 | static int hw_break_release_slot(int breakno) | ||
269 | { | ||
270 | struct perf_event **pevent; | ||
271 | int cpu; | ||
272 | |||
273 | for_each_online_cpu(cpu) { | ||
274 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
275 | if (dbg_release_bp_slot(*pevent)) | ||
276 | /* | ||
277 | * The debugger is responisble for handing the retry on | ||
278 | * remove failure. | ||
279 | */ | ||
280 | return -1; | ||
281 | } | ||
282 | return 0; | ||
283 | } | ||
284 | |||
242 | static int | 285 | static int |
243 | kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | 286 | kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) |
244 | { | 287 | { |
@@ -250,6 +293,10 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
250 | if (i == 4) | 293 | if (i == 4) |
251 | return -1; | 294 | return -1; |
252 | 295 | ||
296 | if (hw_break_release_slot(i)) { | ||
297 | printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); | ||
298 | return -1; | ||
299 | } | ||
253 | breakinfo[i].enabled = 0; | 300 | breakinfo[i].enabled = 0; |
254 | 301 | ||
255 | return 0; | 302 | return 0; |
@@ -316,6 +363,10 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
316 | return -1; | 363 | return -1; |
317 | } | 364 | } |
318 | breakinfo[i].addr = addr; | 365 | breakinfo[i].addr = addr; |
366 | if (hw_break_reserve_slot(i)) { | ||
367 | breakinfo[i].addr = 0; | ||
368 | return -1; | ||
369 | } | ||
319 | breakinfo[i].enabled = 1; | 370 | breakinfo[i].enabled = 1; |
320 | 371 | ||
321 | return 0; | 372 | return 0; |
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 41235c93e4e9..070ba0621738 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h | |||
@@ -75,6 +75,8 @@ extern int __register_perf_hw_breakpoint(struct perf_event *bp); | |||
75 | extern void unregister_hw_breakpoint(struct perf_event *bp); | 75 | extern void unregister_hw_breakpoint(struct perf_event *bp); |
76 | extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); | 76 | extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); |
77 | 77 | ||
78 | extern int dbg_reserve_bp_slot(struct perf_event *bp); | ||
79 | extern int dbg_release_bp_slot(struct perf_event *bp); | ||
78 | extern int reserve_bp_slot(struct perf_event *bp); | 80 | extern int reserve_bp_slot(struct perf_event *bp); |
79 | extern void release_bp_slot(struct perf_event *bp); | 81 | extern void release_bp_slot(struct perf_event *bp); |
80 | 82 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index c030ae657f20..8a5c7d55ac9f 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM | 244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
245 | */ | 245 | */ |
246 | int reserve_bp_slot(struct perf_event *bp) | 246 | static int __reserve_bp_slot(struct perf_event *bp) |
247 | { | 247 | { |
248 | struct bp_busy_slots slots = {0}; | 248 | struct bp_busy_slots slots = {0}; |
249 | int ret = 0; | ||
250 | |||
251 | mutex_lock(&nr_bp_mutex); | ||
252 | 249 | ||
253 | fetch_bp_busy_slots(&slots, bp); | 250 | fetch_bp_busy_slots(&slots, bp); |
254 | 251 | ||
255 | /* Flexible counters need to keep at least one slot */ | 252 | /* Flexible counters need to keep at least one slot */ |
256 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 253 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) |
257 | ret = -ENOSPC; | 254 | return -ENOSPC; |
258 | goto end; | ||
259 | } | ||
260 | 255 | ||
261 | toggle_bp_slot(bp, true); | 256 | toggle_bp_slot(bp, true); |
262 | 257 | ||
263 | end: | 258 | return 0; |
259 | } | ||
260 | |||
261 | int reserve_bp_slot(struct perf_event *bp) | ||
262 | { | ||
263 | int ret; | ||
264 | |||
265 | mutex_lock(&nr_bp_mutex); | ||
266 | |||
267 | ret = __reserve_bp_slot(bp); | ||
268 | |||
264 | mutex_unlock(&nr_bp_mutex); | 269 | mutex_unlock(&nr_bp_mutex); |
265 | 270 | ||
266 | return ret; | 271 | return ret; |
267 | } | 272 | } |
268 | 273 | ||
274 | static void __release_bp_slot(struct perf_event *bp) | ||
275 | { | ||
276 | toggle_bp_slot(bp, false); | ||
277 | } | ||
278 | |||
269 | void release_bp_slot(struct perf_event *bp) | 279 | void release_bp_slot(struct perf_event *bp) |
270 | { | 280 | { |
271 | mutex_lock(&nr_bp_mutex); | 281 | mutex_lock(&nr_bp_mutex); |
272 | 282 | ||
273 | toggle_bp_slot(bp, false); | 283 | __release_bp_slot(bp); |
274 | 284 | ||
275 | mutex_unlock(&nr_bp_mutex); | 285 | mutex_unlock(&nr_bp_mutex); |
276 | } | 286 | } |
277 | 287 | ||
288 | /* | ||
289 | * Allow the kernel debugger to reserve breakpoint slots without | ||
290 | * taking a lock using the dbg_* variant of for the reserve and | ||
291 | * release breakpoint slots. | ||
292 | */ | ||
293 | int dbg_reserve_bp_slot(struct perf_event *bp) | ||
294 | { | ||
295 | if (mutex_is_locked(&nr_bp_mutex)) | ||
296 | return -1; | ||
297 | |||
298 | return __reserve_bp_slot(bp); | ||
299 | } | ||
300 | |||
301 | int dbg_release_bp_slot(struct perf_event *bp) | ||
302 | { | ||
303 | if (mutex_is_locked(&nr_bp_mutex)) | ||
304 | return -1; | ||
305 | |||
306 | __release_bp_slot(bp); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
278 | 310 | ||
279 | int register_perf_hw_breakpoint(struct perf_event *bp) | 311 | int register_perf_hw_breakpoint(struct perf_event *bp) |
280 | { | 312 | { |