diff options
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r-- | kernel/hw_breakpoint.c | 68 |
1 files changed, 54 insertions, 14 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index dbcbf6a33a08..967e66143e11 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/cpu.h> | ||
43 | #include <linux/smp.h> | 44 | #include <linux/smp.h> |
44 | 45 | ||
45 | #include <linux/hw_breakpoint.h> | 46 | #include <linux/hw_breakpoint.h> |
@@ -242,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
243 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM | 244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
244 | */ | 245 | */ |
245 | int reserve_bp_slot(struct perf_event *bp) | 246 | static int __reserve_bp_slot(struct perf_event *bp) |
246 | { | 247 | { |
247 | struct bp_busy_slots slots = {0}; | 248 | struct bp_busy_slots slots = {0}; |
248 | int ret = 0; | ||
249 | |||
250 | mutex_lock(&nr_bp_mutex); | ||
251 | 249 | ||
252 | fetch_bp_busy_slots(&slots, bp); | 250 | fetch_bp_busy_slots(&slots, bp); |
253 | 251 | ||
254 | /* Flexible counters need to keep at least one slot */ | 252 | /* Flexible counters need to keep at least one slot */ |
255 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 253 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) |
256 | ret = -ENOSPC; | 254 | return -ENOSPC; |
257 | goto end; | ||
258 | } | ||
259 | 255 | ||
260 | toggle_bp_slot(bp, true); | 256 | toggle_bp_slot(bp, true); |
261 | 257 | ||
262 | end: | 258 | return 0; |
259 | } | ||
260 | |||
261 | int reserve_bp_slot(struct perf_event *bp) | ||
262 | { | ||
263 | int ret; | ||
264 | |||
265 | mutex_lock(&nr_bp_mutex); | ||
266 | |||
267 | ret = __reserve_bp_slot(bp); | ||
268 | |||
263 | mutex_unlock(&nr_bp_mutex); | 269 | mutex_unlock(&nr_bp_mutex); |
264 | 270 | ||
265 | return ret; | 271 | return ret; |
266 | } | 272 | } |
267 | 273 | ||
274 | static void __release_bp_slot(struct perf_event *bp) | ||
275 | { | ||
276 | toggle_bp_slot(bp, false); | ||
277 | } | ||
278 | |||
268 | void release_bp_slot(struct perf_event *bp) | 279 | void release_bp_slot(struct perf_event *bp) |
269 | { | 280 | { |
270 | mutex_lock(&nr_bp_mutex); | 281 | mutex_lock(&nr_bp_mutex); |
271 | 282 | ||
272 | toggle_bp_slot(bp, false); | 283 | __release_bp_slot(bp); |
273 | 284 | ||
274 | mutex_unlock(&nr_bp_mutex); | 285 | mutex_unlock(&nr_bp_mutex); |
275 | } | 286 | } |
276 | 287 | ||
288 | /* | ||
289 | * Allow the kernel debugger to reserve breakpoint slots without | ||
290 | * taking a lock using the dbg_* variant of for the reserve and | ||
291 | * release breakpoint slots. | ||
292 | */ | ||
293 | int dbg_reserve_bp_slot(struct perf_event *bp) | ||
294 | { | ||
295 | if (mutex_is_locked(&nr_bp_mutex)) | ||
296 | return -1; | ||
297 | |||
298 | return __reserve_bp_slot(bp); | ||
299 | } | ||
300 | |||
301 | int dbg_release_bp_slot(struct perf_event *bp) | ||
302 | { | ||
303 | if (mutex_is_locked(&nr_bp_mutex)) | ||
304 | return -1; | ||
305 | |||
306 | __release_bp_slot(bp); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
277 | 310 | ||
278 | int register_perf_hw_breakpoint(struct perf_event *bp) | 311 | int register_perf_hw_breakpoint(struct perf_event *bp) |
279 | { | 312 | { |
@@ -295,6 +328,10 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
295 | if (!bp->attr.disabled || !bp->overflow_handler) | 328 | if (!bp->attr.disabled || !bp->overflow_handler) |
296 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 329 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
297 | 330 | ||
331 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ | ||
332 | if (ret) | ||
333 | release_bp_slot(bp); | ||
334 | |||
298 | return ret; | 335 | return ret; |
299 | } | 336 | } |
300 | 337 | ||
@@ -323,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
323 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) | 360 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
324 | { | 361 | { |
325 | u64 old_addr = bp->attr.bp_addr; | 362 | u64 old_addr = bp->attr.bp_addr; |
363 | u64 old_len = bp->attr.bp_len; | ||
326 | int old_type = bp->attr.bp_type; | 364 | int old_type = bp->attr.bp_type; |
327 | int old_len = bp->attr.bp_len; | ||
328 | int err = 0; | 365 | int err = 0; |
329 | 366 | ||
330 | perf_event_disable(bp); | 367 | perf_event_disable(bp); |
@@ -388,7 +425,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
388 | if (!cpu_events) | 425 | if (!cpu_events) |
389 | return ERR_PTR(-ENOMEM); | 426 | return ERR_PTR(-ENOMEM); |
390 | 427 | ||
391 | for_each_possible_cpu(cpu) { | 428 | get_online_cpus(); |
429 | for_each_online_cpu(cpu) { | ||
392 | pevent = per_cpu_ptr(cpu_events, cpu); | 430 | pevent = per_cpu_ptr(cpu_events, cpu); |
393 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); | 431 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
394 | 432 | ||
@@ -399,18 +437,20 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
399 | goto fail; | 437 | goto fail; |
400 | } | 438 | } |
401 | } | 439 | } |
440 | put_online_cpus(); | ||
402 | 441 | ||
403 | return cpu_events; | 442 | return cpu_events; |
404 | 443 | ||
405 | fail: | 444 | fail: |
406 | for_each_possible_cpu(cpu) { | 445 | for_each_online_cpu(cpu) { |
407 | pevent = per_cpu_ptr(cpu_events, cpu); | 446 | pevent = per_cpu_ptr(cpu_events, cpu); |
408 | if (IS_ERR(*pevent)) | 447 | if (IS_ERR(*pevent)) |
409 | break; | 448 | break; |
410 | unregister_hw_breakpoint(*pevent); | 449 | unregister_hw_breakpoint(*pevent); |
411 | } | 450 | } |
451 | put_online_cpus(); | ||
452 | |||
412 | free_percpu(cpu_events); | 453 | free_percpu(cpu_events); |
413 | /* return the error if any */ | ||
414 | return ERR_PTR(err); | 454 | return ERR_PTR(err); |
415 | } | 455 | } |
416 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); | 456 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |