diff options
Diffstat (limited to 'kernel/hw_breakpoint.c')
| -rw-r--r-- | kernel/hw_breakpoint.c | 156 |
1 files changed, 95 insertions, 61 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index cf5ee1628411..50dbd5999588 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
| 41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
| 42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
| 43 | #include <linux/cpu.h> | ||
| 43 | #include <linux/smp.h> | 44 | #include <linux/smp.h> |
| 44 | 45 | ||
| 45 | #include <linux/hw_breakpoint.h> | 46 | #include <linux/hw_breakpoint.h> |
| @@ -52,7 +53,7 @@ | |||
| 52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | 53 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); |
| 53 | 54 | ||
| 54 | /* Number of pinned task breakpoints in a cpu */ | 55 | /* Number of pinned task breakpoints in a cpu */ |
| 55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | 56 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); |
| 56 | 57 | ||
| 57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | 58 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
| 58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | 59 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); |
| @@ -73,7 +74,7 @@ static DEFINE_MUTEX(nr_bp_mutex); | |||
| 73 | static unsigned int max_task_bp_pinned(int cpu) | 74 | static unsigned int max_task_bp_pinned(int cpu) |
| 74 | { | 75 | { |
| 75 | int i; | 76 | int i; |
| 76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | 77 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
| 77 | 78 | ||
| 78 | for (i = HBP_NUM -1; i >= 0; i--) { | 79 | for (i = HBP_NUM -1; i >= 0; i--) { |
| 79 | if (tsk_pinned[i] > 0) | 80 | if (tsk_pinned[i] > 0) |
| @@ -83,15 +84,51 @@ static unsigned int max_task_bp_pinned(int cpu) | |||
| 83 | return 0; | 84 | return 0; |
| 84 | } | 85 | } |
| 85 | 86 | ||
| 87 | static int task_bp_pinned(struct task_struct *tsk) | ||
| 88 | { | ||
| 89 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
| 90 | struct list_head *list; | ||
| 91 | struct perf_event *bp; | ||
| 92 | unsigned long flags; | ||
| 93 | int count = 0; | ||
| 94 | |||
| 95 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
| 96 | return 0; | ||
| 97 | |||
| 98 | list = &ctx->event_list; | ||
| 99 | |||
| 100 | raw_spin_lock_irqsave(&ctx->lock, flags); | ||
| 101 | |||
| 102 | /* | ||
| 103 | * The current breakpoint counter is not included in the list | ||
| 104 | * at the open() callback time | ||
| 105 | */ | ||
| 106 | list_for_each_entry(bp, list, event_entry) { | ||
| 107 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
| 108 | count++; | ||
| 109 | } | ||
| 110 | |||
| 111 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | ||
| 112 | |||
| 113 | return count; | ||
| 114 | } | ||
| 115 | |||
| 86 | /* | 116 | /* |
| 87 | * Report the number of pinned/un-pinned breakpoints we have in | 117 | * Report the number of pinned/un-pinned breakpoints we have in |
| 88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | 118 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
| 89 | */ | 119 | */ |
| 90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | 120 | static void |
| 121 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) | ||
| 91 | { | 122 | { |
| 123 | int cpu = bp->cpu; | ||
| 124 | struct task_struct *tsk = bp->ctx->task; | ||
| 125 | |||
| 92 | if (cpu >= 0) { | 126 | if (cpu >= 0) { |
| 93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | 127 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); |
| 94 | slots->pinned += max_task_bp_pinned(cpu); | 128 | if (!tsk) |
| 129 | slots->pinned += max_task_bp_pinned(cpu); | ||
| 130 | else | ||
| 131 | slots->pinned += task_bp_pinned(tsk); | ||
| 95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | 132 | slots->flexible = per_cpu(nr_bp_flexible, cpu); |
| 96 | 133 | ||
| 97 | return; | 134 | return; |
| @@ -101,7 +138,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
| 101 | unsigned int nr; | 138 | unsigned int nr; |
| 102 | 139 | ||
| 103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | 140 | nr = per_cpu(nr_cpu_bp_pinned, cpu); |
| 104 | nr += max_task_bp_pinned(cpu); | 141 | if (!tsk) |
| 142 | nr += max_task_bp_pinned(cpu); | ||
| 143 | else | ||
| 144 | nr += task_bp_pinned(tsk); | ||
| 105 | 145 | ||
| 106 | if (nr > slots->pinned) | 146 | if (nr > slots->pinned) |
| 107 | slots->pinned = nr; | 147 | slots->pinned = nr; |
| @@ -118,35 +158,12 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
| 118 | */ | 158 | */ |
| 119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | 159 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) |
| 120 | { | 160 | { |
| 121 | int count = 0; | ||
| 122 | struct perf_event *bp; | ||
| 123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
| 124 | unsigned int *tsk_pinned; | 161 | unsigned int *tsk_pinned; |
| 125 | struct list_head *list; | 162 | int count = 0; |
| 126 | unsigned long flags; | ||
| 127 | |||
| 128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
| 129 | return; | ||
| 130 | |||
| 131 | list = &ctx->event_list; | ||
| 132 | |||
| 133 | spin_lock_irqsave(&ctx->lock, flags); | ||
| 134 | |||
| 135 | /* | ||
| 136 | * The current breakpoint counter is not included in the list | ||
| 137 | * at the open() callback time | ||
| 138 | */ | ||
| 139 | list_for_each_entry(bp, list, event_entry) { | ||
| 140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
| 141 | count++; | ||
| 142 | } | ||
| 143 | |||
| 144 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
| 145 | 163 | ||
| 146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | 164 | count = task_bp_pinned(tsk); |
| 147 | return; | ||
| 148 | 165 | ||
| 149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | 166 | tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
| 150 | if (enable) { | 167 | if (enable) { |
| 151 | tsk_pinned[count]++; | 168 | tsk_pinned[count]++; |
| 152 | if (count > 0) | 169 | if (count > 0) |
| @@ -193,7 +210,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
| 193 | * - If attached to a single cpu, check: | 210 | * - If attached to a single cpu, check: |
| 194 | * | 211 | * |
| 195 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | 212 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
| 196 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | 213 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
| 197 | * | 214 | * |
| 198 | * -> If there are already non-pinned counters in this cpu, it means | 215 | * -> If there are already non-pinned counters in this cpu, it means |
| 199 | * there is already a free slot for them. | 216 | * there is already a free slot for them. |
| @@ -204,7 +221,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
| 204 | * - If attached to every cpus, check: | 221 | * - If attached to every cpus, check: |
| 205 | * | 222 | * |
| 206 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | 223 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
| 207 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | 224 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
| 208 | * | 225 | * |
| 209 | * -> This is roughly the same, except we check the number of per cpu | 226 | * -> This is roughly the same, except we check the number of per cpu |
| 210 | * bp for every cpu and we keep the max one. Same for the per tasks | 227 | * bp for every cpu and we keep the max one. Same for the per tasks |
| @@ -216,7 +233,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
| 216 | * - If attached to a single cpu, check: | 233 | * - If attached to a single cpu, check: |
| 217 | * | 234 | * |
| 218 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | 235 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
| 219 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | 236 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
| 220 | * | 237 | * |
| 221 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | 238 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
| 222 | * one register at least (or they will never be fed). | 239 | * one register at least (or they will never be fed). |
| @@ -224,7 +241,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
| 224 | * - If attached to every cpus, check: | 241 | * - If attached to every cpus, check: |
| 225 | * | 242 | * |
| 226 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
| 227 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | 244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
| 228 | */ | 245 | */ |
| 229 | int reserve_bp_slot(struct perf_event *bp) | 246 | int reserve_bp_slot(struct perf_event *bp) |
| 230 | { | 247 | { |
| @@ -233,7 +250,7 @@ int reserve_bp_slot(struct perf_event *bp) | |||
| 233 | 250 | ||
| 234 | mutex_lock(&nr_bp_mutex); | 251 | mutex_lock(&nr_bp_mutex); |
| 235 | 252 | ||
| 236 | fetch_bp_busy_slots(&slots, bp->cpu); | 253 | fetch_bp_busy_slots(&slots, bp); |
| 237 | 254 | ||
| 238 | /* Flexible counters need to keep at least one slot */ | 255 | /* Flexible counters need to keep at least one slot */ |
| 239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 256 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { |
| @@ -259,7 +276,7 @@ void release_bp_slot(struct perf_event *bp) | |||
| 259 | } | 276 | } |
| 260 | 277 | ||
| 261 | 278 | ||
| 262 | int __register_perf_hw_breakpoint(struct perf_event *bp) | 279 | int register_perf_hw_breakpoint(struct perf_event *bp) |
| 263 | { | 280 | { |
| 264 | int ret; | 281 | int ret; |
| 265 | 282 | ||
| @@ -276,19 +293,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp) | |||
| 276 | * This is a quick hack that will be removed soon, once we remove | 293 | * This is a quick hack that will be removed soon, once we remove |
| 277 | * the tmp breakpoints from ptrace | 294 | * the tmp breakpoints from ptrace |
| 278 | */ | 295 | */ |
| 279 | if (!bp->attr.disabled || bp->callback == perf_bp_event) | 296 | if (!bp->attr.disabled || !bp->overflow_handler) |
| 280 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 297 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
| 281 | 298 | ||
| 282 | return ret; | 299 | return ret; |
| 283 | } | 300 | } |
| 284 | 301 | ||
| 285 | int register_perf_hw_breakpoint(struct perf_event *bp) | ||
| 286 | { | ||
| 287 | bp->callback = perf_bp_event; | ||
| 288 | |||
| 289 | return __register_perf_hw_breakpoint(bp); | ||
| 290 | } | ||
| 291 | |||
| 292 | /** | 302 | /** |
| 293 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | 303 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
| 294 | * @attr: breakpoint attributes | 304 | * @attr: breakpoint attributes |
| @@ -297,7 +307,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
| 297 | */ | 307 | */ |
| 298 | struct perf_event * | 308 | struct perf_event * |
| 299 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 309 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
| 300 | perf_callback_t triggered, | 310 | perf_overflow_handler_t triggered, |
| 301 | struct task_struct *tsk) | 311 | struct task_struct *tsk) |
| 302 | { | 312 | { |
| 303 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 313 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
| @@ -311,19 +321,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
| 311 | * @triggered: callback to trigger when we hit the breakpoint | 321 | * @triggered: callback to trigger when we hit the breakpoint |
| 312 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 322 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
| 313 | */ | 323 | */ |
| 314 | struct perf_event * | 324 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
| 315 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, | ||
| 316 | perf_callback_t triggered, | ||
| 317 | struct task_struct *tsk) | ||
| 318 | { | 325 | { |
| 319 | /* | 326 | u64 old_addr = bp->attr.bp_addr; |
| 320 | * FIXME: do it without unregistering | 327 | int old_type = bp->attr.bp_type; |
| 321 | * - We don't want to lose our slot | 328 | int old_len = bp->attr.bp_len; |
| 322 | * - If the new bp is incorrect, don't lose the older one | 329 | int err = 0; |
| 323 | */ | ||
| 324 | unregister_hw_breakpoint(bp); | ||
| 325 | 330 | ||
| 326 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 331 | perf_event_disable(bp); |
| 332 | |||
| 333 | bp->attr.bp_addr = attr->bp_addr; | ||
| 334 | bp->attr.bp_type = attr->bp_type; | ||
| 335 | bp->attr.bp_len = attr->bp_len; | ||
| 336 | |||
| 337 | if (attr->disabled) | ||
| 338 | goto end; | ||
| 339 | |||
| 340 | err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | ||
| 341 | if (!err) | ||
| 342 | perf_event_enable(bp); | ||
| 343 | |||
| 344 | if (err) { | ||
| 345 | bp->attr.bp_addr = old_addr; | ||
| 346 | bp->attr.bp_type = old_type; | ||
| 347 | bp->attr.bp_len = old_len; | ||
| 348 | if (!bp->attr.disabled) | ||
| 349 | perf_event_enable(bp); | ||
| 350 | |||
| 351 | return err; | ||
| 352 | } | ||
| 353 | |||
| 354 | end: | ||
| 355 | bp->attr.disabled = attr->disabled; | ||
| 356 | |||
| 357 | return 0; | ||
| 327 | } | 358 | } |
| 328 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 359 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
| 329 | 360 | ||
| @@ -348,7 +379,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |||
| 348 | */ | 379 | */ |
| 349 | struct perf_event ** | 380 | struct perf_event ** |
| 350 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 381 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
| 351 | perf_callback_t triggered) | 382 | perf_overflow_handler_t triggered) |
| 352 | { | 383 | { |
| 353 | struct perf_event **cpu_events, **pevent, *bp; | 384 | struct perf_event **cpu_events, **pevent, *bp; |
| 354 | long err; | 385 | long err; |
| @@ -358,7 +389,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
| 358 | if (!cpu_events) | 389 | if (!cpu_events) |
| 359 | return ERR_PTR(-ENOMEM); | 390 | return ERR_PTR(-ENOMEM); |
| 360 | 391 | ||
| 361 | for_each_possible_cpu(cpu) { | 392 | get_online_cpus(); |
| 393 | for_each_online_cpu(cpu) { | ||
| 362 | pevent = per_cpu_ptr(cpu_events, cpu); | 394 | pevent = per_cpu_ptr(cpu_events, cpu); |
| 363 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); | 395 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
| 364 | 396 | ||
| @@ -369,18 +401,20 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
| 369 | goto fail; | 401 | goto fail; |
| 370 | } | 402 | } |
| 371 | } | 403 | } |
| 404 | put_online_cpus(); | ||
| 372 | 405 | ||
| 373 | return cpu_events; | 406 | return cpu_events; |
| 374 | 407 | ||
| 375 | fail: | 408 | fail: |
| 376 | for_each_possible_cpu(cpu) { | 409 | for_each_online_cpu(cpu) { |
| 377 | pevent = per_cpu_ptr(cpu_events, cpu); | 410 | pevent = per_cpu_ptr(cpu_events, cpu); |
| 378 | if (IS_ERR(*pevent)) | 411 | if (IS_ERR(*pevent)) |
| 379 | break; | 412 | break; |
| 380 | unregister_hw_breakpoint(*pevent); | 413 | unregister_hw_breakpoint(*pevent); |
| 381 | } | 414 | } |
| 415 | put_online_cpus(); | ||
| 416 | |||
| 382 | free_percpu(cpu_events); | 417 | free_percpu(cpu_events); |
| 383 | /* return the error if any */ | ||
| 384 | return ERR_PTR(err); | 418 | return ERR_PTR(err); |
| 385 | } | 419 | } |
| 386 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); | 420 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
