diff options
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r-- | kernel/hw_breakpoint.c | 146 |
1 files changed, 88 insertions, 58 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index cf5ee1628411..366eedf949c0 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -52,7 +52,7 @@ | |||
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | 52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); |
53 | 53 | ||
54 | /* Number of pinned task breakpoints in a cpu */ | 54 | /* Number of pinned task breakpoints in a cpu */ |
55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | 55 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); |
56 | 56 | ||
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | 57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | 58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); |
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex); | |||
73 | static unsigned int max_task_bp_pinned(int cpu) | 73 | static unsigned int max_task_bp_pinned(int cpu) |
74 | { | 74 | { |
75 | int i; | 75 | int i; |
76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | 76 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
77 | 77 | ||
78 | for (i = HBP_NUM -1; i >= 0; i--) { | 78 | for (i = HBP_NUM -1; i >= 0; i--) { |
79 | if (tsk_pinned[i] > 0) | 79 | if (tsk_pinned[i] > 0) |
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static int task_bp_pinned(struct task_struct *tsk) | ||
87 | { | ||
88 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
89 | struct list_head *list; | ||
90 | struct perf_event *bp; | ||
91 | unsigned long flags; | ||
92 | int count = 0; | ||
93 | |||
94 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
95 | return 0; | ||
96 | |||
97 | list = &ctx->event_list; | ||
98 | |||
99 | spin_lock_irqsave(&ctx->lock, flags); | ||
100 | |||
101 | /* | ||
102 | * The current breakpoint counter is not included in the list | ||
103 | * at the open() callback time | ||
104 | */ | ||
105 | list_for_each_entry(bp, list, event_entry) { | ||
106 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
107 | count++; | ||
108 | } | ||
109 | |||
110 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
111 | |||
112 | return count; | ||
113 | } | ||
114 | |||
86 | /* | 115 | /* |
87 | * Report the number of pinned/un-pinned breakpoints we have in | 116 | * Report the number of pinned/un-pinned breakpoints we have in |
88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | 117 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
89 | */ | 118 | */ |
90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | 119 | static void |
120 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) | ||
91 | { | 121 | { |
122 | int cpu = bp->cpu; | ||
123 | struct task_struct *tsk = bp->ctx->task; | ||
124 | |||
92 | if (cpu >= 0) { | 125 | if (cpu >= 0) { |
93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | 126 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); |
94 | slots->pinned += max_task_bp_pinned(cpu); | 127 | if (!tsk) |
128 | slots->pinned += max_task_bp_pinned(cpu); | ||
129 | else | ||
130 | slots->pinned += task_bp_pinned(tsk); | ||
95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | 131 | slots->flexible = per_cpu(nr_bp_flexible, cpu); |
96 | 132 | ||
97 | return; | 133 | return; |
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
101 | unsigned int nr; | 137 | unsigned int nr; |
102 | 138 | ||
103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | 139 | nr = per_cpu(nr_cpu_bp_pinned, cpu); |
104 | nr += max_task_bp_pinned(cpu); | 140 | if (!tsk) |
141 | nr += max_task_bp_pinned(cpu); | ||
142 | else | ||
143 | nr += task_bp_pinned(tsk); | ||
105 | 144 | ||
106 | if (nr > slots->pinned) | 145 | if (nr > slots->pinned) |
107 | slots->pinned = nr; | 146 | slots->pinned = nr; |
@@ -118,35 +157,12 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
118 | */ | 157 | */ |
119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | 158 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) |
120 | { | 159 | { |
121 | int count = 0; | ||
122 | struct perf_event *bp; | ||
123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
124 | unsigned int *tsk_pinned; | 160 | unsigned int *tsk_pinned; |
125 | struct list_head *list; | 161 | int count = 0; |
126 | unsigned long flags; | ||
127 | |||
128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
129 | return; | ||
130 | |||
131 | list = &ctx->event_list; | ||
132 | |||
133 | spin_lock_irqsave(&ctx->lock, flags); | ||
134 | |||
135 | /* | ||
136 | * The current breakpoint counter is not included in the list | ||
137 | * at the open() callback time | ||
138 | */ | ||
139 | list_for_each_entry(bp, list, event_entry) { | ||
140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
141 | count++; | ||
142 | } | ||
143 | 162 | ||
144 | spin_unlock_irqrestore(&ctx->lock, flags); | 163 | count = task_bp_pinned(tsk); |
145 | 164 | ||
146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | 165 | tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
147 | return; | ||
148 | |||
149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | ||
150 | if (enable) { | 166 | if (enable) { |
151 | tsk_pinned[count]++; | 167 | tsk_pinned[count]++; |
152 | if (count > 0) | 168 | if (count > 0) |
@@ -193,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
193 | * - If attached to a single cpu, check: | 209 | * - If attached to a single cpu, check: |
194 | * | 210 | * |
195 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | 211 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
196 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | 212 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
197 | * | 213 | * |
198 | * -> If there are already non-pinned counters in this cpu, it means | 214 | * -> If there are already non-pinned counters in this cpu, it means |
199 | * there is already a free slot for them. | 215 | * there is already a free slot for them. |
@@ -204,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
204 | * - If attached to every cpus, check: | 220 | * - If attached to every cpus, check: |
205 | * | 221 | * |
206 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | 222 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
207 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | 223 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
208 | * | 224 | * |
209 | * -> This is roughly the same, except we check the number of per cpu | 225 | * -> This is roughly the same, except we check the number of per cpu |
210 | * bp for every cpu and we keep the max one. Same for the per tasks | 226 | * bp for every cpu and we keep the max one. Same for the per tasks |
@@ -216,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
216 | * - If attached to a single cpu, check: | 232 | * - If attached to a single cpu, check: |
217 | * | 233 | * |
218 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | 234 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
219 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | 235 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
220 | * | 236 | * |
221 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | 237 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
222 | * one register at least (or they will never be fed). | 238 | * one register at least (or they will never be fed). |
@@ -224,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
224 | * - If attached to every cpus, check: | 240 | * - If attached to every cpus, check: |
225 | * | 241 | * |
226 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
227 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | 243 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
228 | */ | 244 | */ |
229 | int reserve_bp_slot(struct perf_event *bp) | 245 | int reserve_bp_slot(struct perf_event *bp) |
230 | { | 246 | { |
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp) | |||
233 | 249 | ||
234 | mutex_lock(&nr_bp_mutex); | 250 | mutex_lock(&nr_bp_mutex); |
235 | 251 | ||
236 | fetch_bp_busy_slots(&slots, bp->cpu); | 252 | fetch_bp_busy_slots(&slots, bp); |
237 | 253 | ||
238 | /* Flexible counters need to keep at least one slot */ | 254 | /* Flexible counters need to keep at least one slot */ |
239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 255 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { |
@@ -259,7 +275,7 @@ void release_bp_slot(struct perf_event *bp) | |||
259 | } | 275 | } |
260 | 276 | ||
261 | 277 | ||
262 | int __register_perf_hw_breakpoint(struct perf_event *bp) | 278 | int register_perf_hw_breakpoint(struct perf_event *bp) |
263 | { | 279 | { |
264 | int ret; | 280 | int ret; |
265 | 281 | ||
@@ -276,19 +292,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp) | |||
276 | * This is a quick hack that will be removed soon, once we remove | 292 | * This is a quick hack that will be removed soon, once we remove |
277 | * the tmp breakpoints from ptrace | 293 | * the tmp breakpoints from ptrace |
278 | */ | 294 | */ |
279 | if (!bp->attr.disabled || bp->callback == perf_bp_event) | 295 | if (!bp->attr.disabled || !bp->overflow_handler) |
280 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 296 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
281 | 297 | ||
282 | return ret; | 298 | return ret; |
283 | } | 299 | } |
284 | 300 | ||
285 | int register_perf_hw_breakpoint(struct perf_event *bp) | ||
286 | { | ||
287 | bp->callback = perf_bp_event; | ||
288 | |||
289 | return __register_perf_hw_breakpoint(bp); | ||
290 | } | ||
291 | |||
292 | /** | 301 | /** |
293 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | 302 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
294 | * @attr: breakpoint attributes | 303 | * @attr: breakpoint attributes |
@@ -297,7 +306,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
297 | */ | 306 | */ |
298 | struct perf_event * | 307 | struct perf_event * |
299 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 308 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
300 | perf_callback_t triggered, | 309 | perf_overflow_handler_t triggered, |
301 | struct task_struct *tsk) | 310 | struct task_struct *tsk) |
302 | { | 311 | { |
303 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 312 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
@@ -311,19 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
311 | * @triggered: callback to trigger when we hit the breakpoint | 320 | * @triggered: callback to trigger when we hit the breakpoint |
312 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 321 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
313 | */ | 322 | */ |
314 | struct perf_event * | 323 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
315 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, | ||
316 | perf_callback_t triggered, | ||
317 | struct task_struct *tsk) | ||
318 | { | 324 | { |
319 | /* | 325 | u64 old_addr = bp->attr.bp_addr; |
320 | * FIXME: do it without unregistering | 326 | int old_type = bp->attr.bp_type; |
321 | * - We don't want to lose our slot | 327 | int old_len = bp->attr.bp_len; |
322 | * - If the new bp is incorrect, don't lose the older one | 328 | int err = 0; |
323 | */ | ||
324 | unregister_hw_breakpoint(bp); | ||
325 | 329 | ||
326 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 330 | perf_event_disable(bp); |
331 | |||
332 | bp->attr.bp_addr = attr->bp_addr; | ||
333 | bp->attr.bp_type = attr->bp_type; | ||
334 | bp->attr.bp_len = attr->bp_len; | ||
335 | |||
336 | if (attr->disabled) | ||
337 | goto end; | ||
338 | |||
339 | err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | ||
340 | if (!err) | ||
341 | perf_event_enable(bp); | ||
342 | |||
343 | if (err) { | ||
344 | bp->attr.bp_addr = old_addr; | ||
345 | bp->attr.bp_type = old_type; | ||
346 | bp->attr.bp_len = old_len; | ||
347 | if (!bp->attr.disabled) | ||
348 | perf_event_enable(bp); | ||
349 | |||
350 | return err; | ||
351 | } | ||
352 | |||
353 | end: | ||
354 | bp->attr.disabled = attr->disabled; | ||
355 | |||
356 | return 0; | ||
327 | } | 357 | } |
328 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 358 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
329 | 359 | ||
@@ -348,7 +378,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |||
348 | */ | 378 | */ |
349 | struct perf_event ** | 379 | struct perf_event ** |
350 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 380 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
351 | perf_callback_t triggered) | 381 | perf_overflow_handler_t triggered) |
352 | { | 382 | { |
353 | struct perf_event **cpu_events, **pevent, *bp; | 383 | struct perf_event **cpu_events, **pevent, *bp; |
354 | long err; | 384 | long err; |