diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-11 23:47:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-11 23:47:30 -0500 |
commit | 6f696eb17be741668810fe1f798135c7cf6733e2 (patch) | |
tree | f9bcfe5831dfcaaad50ca68d7f04d80d8236fa56 /kernel | |
parent | c4e194e3b71ff4fed01d727c32ee1071921d28a3 (diff) | |
parent | 125580380f418000b1a06d9a54700f1191b6e561 (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (57 commits)
x86, perf events: Check if we have APIC enabled
perf_event: Fix variable initialization in other codepaths
perf kmem: Fix unused argument build warning
perf symbols: perf_header__read_build_ids() offset'n'size should be u64
perf symbols: dsos__read_build_ids() should read both user and kernel buildids
perf tools: Align long options which have no short forms
perf kmem: Show usage if no option is specified
sched: Mark sched_clock() as notrace
perf sched: Add max delay time snapshot
perf tools: Correct size given to memset
perf_event: Fix perf_swevent_hrtimer() variable initialization
perf sched: Fix for getting task's execution time
tracing/kprobes: Fix field creation's bad error handling
perf_event: Cleanup for cpu_clock_perf_event_update()
perf_event: Allocate children's perf_event_ctxp at the right time
perf_event: Clean up __perf_event_init_context()
hw-breakpoints: Modify breakpoints without unregistering them
perf probe: Update perf-probe document
perf probe: Support --del option
trace-kprobe: Support delete probe syntax
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/hw_breakpoint.c | 146 | ||||
-rw-r--r-- | kernel/perf_event.c | 75 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 41 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 5 |
4 files changed, 159 insertions, 108 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index cf5ee1628411..366eedf949c0 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -52,7 +52,7 @@ | |||
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | 52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); |
53 | 53 | ||
54 | /* Number of pinned task breakpoints in a cpu */ | 54 | /* Number of pinned task breakpoints in a cpu */ |
55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | 55 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); |
56 | 56 | ||
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | 57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | 58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); |
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex); | |||
73 | static unsigned int max_task_bp_pinned(int cpu) | 73 | static unsigned int max_task_bp_pinned(int cpu) |
74 | { | 74 | { |
75 | int i; | 75 | int i; |
76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | 76 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
77 | 77 | ||
78 | for (i = HBP_NUM -1; i >= 0; i--) { | 78 | for (i = HBP_NUM -1; i >= 0; i--) { |
79 | if (tsk_pinned[i] > 0) | 79 | if (tsk_pinned[i] > 0) |
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static int task_bp_pinned(struct task_struct *tsk) | ||
87 | { | ||
88 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
89 | struct list_head *list; | ||
90 | struct perf_event *bp; | ||
91 | unsigned long flags; | ||
92 | int count = 0; | ||
93 | |||
94 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
95 | return 0; | ||
96 | |||
97 | list = &ctx->event_list; | ||
98 | |||
99 | spin_lock_irqsave(&ctx->lock, flags); | ||
100 | |||
101 | /* | ||
102 | * The current breakpoint counter is not included in the list | ||
103 | * at the open() callback time | ||
104 | */ | ||
105 | list_for_each_entry(bp, list, event_entry) { | ||
106 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
107 | count++; | ||
108 | } | ||
109 | |||
110 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
111 | |||
112 | return count; | ||
113 | } | ||
114 | |||
86 | /* | 115 | /* |
87 | * Report the number of pinned/un-pinned breakpoints we have in | 116 | * Report the number of pinned/un-pinned breakpoints we have in |
88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | 117 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
89 | */ | 118 | */ |
90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | 119 | static void |
120 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) | ||
91 | { | 121 | { |
122 | int cpu = bp->cpu; | ||
123 | struct task_struct *tsk = bp->ctx->task; | ||
124 | |||
92 | if (cpu >= 0) { | 125 | if (cpu >= 0) { |
93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | 126 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); |
94 | slots->pinned += max_task_bp_pinned(cpu); | 127 | if (!tsk) |
128 | slots->pinned += max_task_bp_pinned(cpu); | ||
129 | else | ||
130 | slots->pinned += task_bp_pinned(tsk); | ||
95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | 131 | slots->flexible = per_cpu(nr_bp_flexible, cpu); |
96 | 132 | ||
97 | return; | 133 | return; |
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
101 | unsigned int nr; | 137 | unsigned int nr; |
102 | 138 | ||
103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | 139 | nr = per_cpu(nr_cpu_bp_pinned, cpu); |
104 | nr += max_task_bp_pinned(cpu); | 140 | if (!tsk) |
141 | nr += max_task_bp_pinned(cpu); | ||
142 | else | ||
143 | nr += task_bp_pinned(tsk); | ||
105 | 144 | ||
106 | if (nr > slots->pinned) | 145 | if (nr > slots->pinned) |
107 | slots->pinned = nr; | 146 | slots->pinned = nr; |
@@ -118,35 +157,12 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
118 | */ | 157 | */ |
119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | 158 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) |
120 | { | 159 | { |
121 | int count = 0; | ||
122 | struct perf_event *bp; | ||
123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
124 | unsigned int *tsk_pinned; | 160 | unsigned int *tsk_pinned; |
125 | struct list_head *list; | 161 | int count = 0; |
126 | unsigned long flags; | ||
127 | |||
128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
129 | return; | ||
130 | |||
131 | list = &ctx->event_list; | ||
132 | |||
133 | spin_lock_irqsave(&ctx->lock, flags); | ||
134 | |||
135 | /* | ||
136 | * The current breakpoint counter is not included in the list | ||
137 | * at the open() callback time | ||
138 | */ | ||
139 | list_for_each_entry(bp, list, event_entry) { | ||
140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
141 | count++; | ||
142 | } | ||
143 | 162 | ||
144 | spin_unlock_irqrestore(&ctx->lock, flags); | 163 | count = task_bp_pinned(tsk); |
145 | 164 | ||
146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | 165 | tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
147 | return; | ||
148 | |||
149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | ||
150 | if (enable) { | 166 | if (enable) { |
151 | tsk_pinned[count]++; | 167 | tsk_pinned[count]++; |
152 | if (count > 0) | 168 | if (count > 0) |
@@ -193,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
193 | * - If attached to a single cpu, check: | 209 | * - If attached to a single cpu, check: |
194 | * | 210 | * |
195 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | 211 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
196 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | 212 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
197 | * | 213 | * |
198 | * -> If there are already non-pinned counters in this cpu, it means | 214 | * -> If there are already non-pinned counters in this cpu, it means |
199 | * there is already a free slot for them. | 215 | * there is already a free slot for them. |
@@ -204,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
204 | * - If attached to every cpus, check: | 220 | * - If attached to every cpus, check: |
205 | * | 221 | * |
206 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | 222 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
207 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | 223 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
208 | * | 224 | * |
209 | * -> This is roughly the same, except we check the number of per cpu | 225 | * -> This is roughly the same, except we check the number of per cpu |
210 | * bp for every cpu and we keep the max one. Same for the per tasks | 226 | * bp for every cpu and we keep the max one. Same for the per tasks |
@@ -216,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
216 | * - If attached to a single cpu, check: | 232 | * - If attached to a single cpu, check: |
217 | * | 233 | * |
218 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | 234 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
219 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | 235 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
220 | * | 236 | * |
221 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | 237 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
222 | * one register at least (or they will never be fed). | 238 | * one register at least (or they will never be fed). |
@@ -224,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
224 | * - If attached to every cpus, check: | 240 | * - If attached to every cpus, check: |
225 | * | 241 | * |
226 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
227 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | 243 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
228 | */ | 244 | */ |
229 | int reserve_bp_slot(struct perf_event *bp) | 245 | int reserve_bp_slot(struct perf_event *bp) |
230 | { | 246 | { |
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp) | |||
233 | 249 | ||
234 | mutex_lock(&nr_bp_mutex); | 250 | mutex_lock(&nr_bp_mutex); |
235 | 251 | ||
236 | fetch_bp_busy_slots(&slots, bp->cpu); | 252 | fetch_bp_busy_slots(&slots, bp); |
237 | 253 | ||
238 | /* Flexible counters need to keep at least one slot */ | 254 | /* Flexible counters need to keep at least one slot */ |
239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 255 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { |
@@ -259,7 +275,7 @@ void release_bp_slot(struct perf_event *bp) | |||
259 | } | 275 | } |
260 | 276 | ||
261 | 277 | ||
262 | int __register_perf_hw_breakpoint(struct perf_event *bp) | 278 | int register_perf_hw_breakpoint(struct perf_event *bp) |
263 | { | 279 | { |
264 | int ret; | 280 | int ret; |
265 | 281 | ||
@@ -276,19 +292,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp) | |||
276 | * This is a quick hack that will be removed soon, once we remove | 292 | * This is a quick hack that will be removed soon, once we remove |
277 | * the tmp breakpoints from ptrace | 293 | * the tmp breakpoints from ptrace |
278 | */ | 294 | */ |
279 | if (!bp->attr.disabled || bp->callback == perf_bp_event) | 295 | if (!bp->attr.disabled || !bp->overflow_handler) |
280 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 296 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
281 | 297 | ||
282 | return ret; | 298 | return ret; |
283 | } | 299 | } |
284 | 300 | ||
285 | int register_perf_hw_breakpoint(struct perf_event *bp) | ||
286 | { | ||
287 | bp->callback = perf_bp_event; | ||
288 | |||
289 | return __register_perf_hw_breakpoint(bp); | ||
290 | } | ||
291 | |||
292 | /** | 301 | /** |
293 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | 302 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
294 | * @attr: breakpoint attributes | 303 | * @attr: breakpoint attributes |
@@ -297,7 +306,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
297 | */ | 306 | */ |
298 | struct perf_event * | 307 | struct perf_event * |
299 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 308 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
300 | perf_callback_t triggered, | 309 | perf_overflow_handler_t triggered, |
301 | struct task_struct *tsk) | 310 | struct task_struct *tsk) |
302 | { | 311 | { |
303 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 312 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
@@ -311,19 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
311 | * @triggered: callback to trigger when we hit the breakpoint | 320 | * @triggered: callback to trigger when we hit the breakpoint |
312 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 321 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
313 | */ | 322 | */ |
314 | struct perf_event * | 323 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
315 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, | ||
316 | perf_callback_t triggered, | ||
317 | struct task_struct *tsk) | ||
318 | { | 324 | { |
319 | /* | 325 | u64 old_addr = bp->attr.bp_addr; |
320 | * FIXME: do it without unregistering | 326 | int old_type = bp->attr.bp_type; |
321 | * - We don't want to lose our slot | 327 | int old_len = bp->attr.bp_len; |
322 | * - If the new bp is incorrect, don't lose the older one | 328 | int err = 0; |
323 | */ | ||
324 | unregister_hw_breakpoint(bp); | ||
325 | 329 | ||
326 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 330 | perf_event_disable(bp); |
331 | |||
332 | bp->attr.bp_addr = attr->bp_addr; | ||
333 | bp->attr.bp_type = attr->bp_type; | ||
334 | bp->attr.bp_len = attr->bp_len; | ||
335 | |||
336 | if (attr->disabled) | ||
337 | goto end; | ||
338 | |||
339 | err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | ||
340 | if (!err) | ||
341 | perf_event_enable(bp); | ||
342 | |||
343 | if (err) { | ||
344 | bp->attr.bp_addr = old_addr; | ||
345 | bp->attr.bp_type = old_type; | ||
346 | bp->attr.bp_len = old_len; | ||
347 | if (!bp->attr.disabled) | ||
348 | perf_event_enable(bp); | ||
349 | |||
350 | return err; | ||
351 | } | ||
352 | |||
353 | end: | ||
354 | bp->attr.disabled = attr->disabled; | ||
355 | |||
356 | return 0; | ||
327 | } | 357 | } |
328 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 358 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
329 | 359 | ||
@@ -348,7 +378,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |||
348 | */ | 378 | */ |
349 | struct perf_event ** | 379 | struct perf_event ** |
350 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 380 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
351 | perf_callback_t triggered) | 381 | perf_overflow_handler_t triggered) |
352 | { | 382 | { |
353 | struct perf_event **cpu_events, **pevent, *bp; | 383 | struct perf_event **cpu_events, **pevent, *bp; |
354 | long err; | 384 | long err; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 40a996ec39fa..e73e53c7582f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -36,7 +36,7 @@ | |||
36 | /* | 36 | /* |
37 | * Each CPU has a list of per CPU events: | 37 | * Each CPU has a list of per CPU events: |
38 | */ | 38 | */ |
39 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | 39 | static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); |
40 | 40 | ||
41 | int perf_max_events __read_mostly = 1; | 41 | int perf_max_events __read_mostly = 1; |
42 | static int perf_reserved_percpu __read_mostly; | 42 | static int perf_reserved_percpu __read_mostly; |
@@ -567,7 +567,7 @@ static void __perf_event_disable(void *info) | |||
567 | * is the current context on this CPU and preemption is disabled, | 567 | * is the current context on this CPU and preemption is disabled, |
568 | * hence we can't get into perf_event_task_sched_out for this context. | 568 | * hence we can't get into perf_event_task_sched_out for this context. |
569 | */ | 569 | */ |
570 | static void perf_event_disable(struct perf_event *event) | 570 | void perf_event_disable(struct perf_event *event) |
571 | { | 571 | { |
572 | struct perf_event_context *ctx = event->ctx; | 572 | struct perf_event_context *ctx = event->ctx; |
573 | struct task_struct *task = ctx->task; | 573 | struct task_struct *task = ctx->task; |
@@ -971,7 +971,7 @@ static void __perf_event_enable(void *info) | |||
971 | * perf_event_for_each_child or perf_event_for_each as described | 971 | * perf_event_for_each_child or perf_event_for_each as described |
972 | * for perf_event_disable. | 972 | * for perf_event_disable. |
973 | */ | 973 | */ |
974 | static void perf_event_enable(struct perf_event *event) | 974 | void perf_event_enable(struct perf_event *event) |
975 | { | 975 | { |
976 | struct perf_event_context *ctx = event->ctx; | 976 | struct perf_event_context *ctx = event->ctx; |
977 | struct task_struct *task = ctx->task; | 977 | struct task_struct *task = ctx->task; |
@@ -1579,7 +1579,6 @@ static void | |||
1579 | __perf_event_init_context(struct perf_event_context *ctx, | 1579 | __perf_event_init_context(struct perf_event_context *ctx, |
1580 | struct task_struct *task) | 1580 | struct task_struct *task) |
1581 | { | 1581 | { |
1582 | memset(ctx, 0, sizeof(*ctx)); | ||
1583 | spin_lock_init(&ctx->lock); | 1582 | spin_lock_init(&ctx->lock); |
1584 | mutex_init(&ctx->mutex); | 1583 | mutex_init(&ctx->mutex); |
1585 | INIT_LIST_HEAD(&ctx->group_list); | 1584 | INIT_LIST_HEAD(&ctx->group_list); |
@@ -1654,7 +1653,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) | |||
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | if (!ctx) { | 1655 | if (!ctx) { |
1657 | ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 1656 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
1658 | err = -ENOMEM; | 1657 | err = -ENOMEM; |
1659 | if (!ctx) | 1658 | if (!ctx) |
1660 | goto errout; | 1659 | goto errout; |
@@ -4011,6 +4010,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
4011 | event->pmu->read(event); | 4010 | event->pmu->read(event); |
4012 | 4011 | ||
4013 | data.addr = 0; | 4012 | data.addr = 0; |
4013 | data.raw = NULL; | ||
4014 | data.period = event->hw.last_period; | 4014 | data.period = event->hw.last_period; |
4015 | regs = get_irq_regs(); | 4015 | regs = get_irq_regs(); |
4016 | /* | 4016 | /* |
@@ -4080,8 +4080,7 @@ static void cpu_clock_perf_event_update(struct perf_event *event) | |||
4080 | u64 now; | 4080 | u64 now; |
4081 | 4081 | ||
4082 | now = cpu_clock(cpu); | 4082 | now = cpu_clock(cpu); |
4083 | prev = atomic64_read(&event->hw.prev_count); | 4083 | prev = atomic64_xchg(&event->hw.prev_count, now); |
4084 | atomic64_set(&event->hw.prev_count, now); | ||
4085 | atomic64_add(now - prev, &event->count); | 4084 | atomic64_add(now - prev, &event->count); |
4086 | } | 4085 | } |
4087 | 4086 | ||
@@ -4286,15 +4285,8 @@ static void bp_perf_event_destroy(struct perf_event *event) | |||
4286 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | 4285 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) |
4287 | { | 4286 | { |
4288 | int err; | 4287 | int err; |
4289 | /* | 4288 | |
4290 | * The breakpoint is already filled if we haven't created the counter | 4289 | err = register_perf_hw_breakpoint(bp); |
4291 | * through perf syscall | ||
4292 | * FIXME: manage to get trigerred to NULL if it comes from syscalls | ||
4293 | */ | ||
4294 | if (!bp->callback) | ||
4295 | err = register_perf_hw_breakpoint(bp); | ||
4296 | else | ||
4297 | err = __register_perf_hw_breakpoint(bp); | ||
4298 | if (err) | 4290 | if (err) |
4299 | return ERR_PTR(err); | 4291 | return ERR_PTR(err); |
4300 | 4292 | ||
@@ -4308,6 +4300,7 @@ void perf_bp_event(struct perf_event *bp, void *data) | |||
4308 | struct perf_sample_data sample; | 4300 | struct perf_sample_data sample; |
4309 | struct pt_regs *regs = data; | 4301 | struct pt_regs *regs = data; |
4310 | 4302 | ||
4303 | sample.raw = NULL; | ||
4311 | sample.addr = bp->attr.bp_addr; | 4304 | sample.addr = bp->attr.bp_addr; |
4312 | 4305 | ||
4313 | if (!perf_exclude_event(bp, regs)) | 4306 | if (!perf_exclude_event(bp, regs)) |
@@ -4390,7 +4383,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4390 | struct perf_event_context *ctx, | 4383 | struct perf_event_context *ctx, |
4391 | struct perf_event *group_leader, | 4384 | struct perf_event *group_leader, |
4392 | struct perf_event *parent_event, | 4385 | struct perf_event *parent_event, |
4393 | perf_callback_t callback, | 4386 | perf_overflow_handler_t overflow_handler, |
4394 | gfp_t gfpflags) | 4387 | gfp_t gfpflags) |
4395 | { | 4388 | { |
4396 | const struct pmu *pmu; | 4389 | const struct pmu *pmu; |
@@ -4433,10 +4426,10 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4433 | 4426 | ||
4434 | event->state = PERF_EVENT_STATE_INACTIVE; | 4427 | event->state = PERF_EVENT_STATE_INACTIVE; |
4435 | 4428 | ||
4436 | if (!callback && parent_event) | 4429 | if (!overflow_handler && parent_event) |
4437 | callback = parent_event->callback; | 4430 | overflow_handler = parent_event->overflow_handler; |
4438 | 4431 | ||
4439 | event->callback = callback; | 4432 | event->overflow_handler = overflow_handler; |
4440 | 4433 | ||
4441 | if (attr->disabled) | 4434 | if (attr->disabled) |
4442 | event->state = PERF_EVENT_STATE_OFF; | 4435 | event->state = PERF_EVENT_STATE_OFF; |
@@ -4776,7 +4769,8 @@ err_put_context: | |||
4776 | */ | 4769 | */ |
4777 | struct perf_event * | 4770 | struct perf_event * |
4778 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | 4771 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
4779 | pid_t pid, perf_callback_t callback) | 4772 | pid_t pid, |
4773 | perf_overflow_handler_t overflow_handler) | ||
4780 | { | 4774 | { |
4781 | struct perf_event *event; | 4775 | struct perf_event *event; |
4782 | struct perf_event_context *ctx; | 4776 | struct perf_event_context *ctx; |
@@ -4793,7 +4787,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
4793 | } | 4787 | } |
4794 | 4788 | ||
4795 | event = perf_event_alloc(attr, cpu, ctx, NULL, | 4789 | event = perf_event_alloc(attr, cpu, ctx, NULL, |
4796 | NULL, callback, GFP_KERNEL); | 4790 | NULL, overflow_handler, GFP_KERNEL); |
4797 | if (IS_ERR(event)) { | 4791 | if (IS_ERR(event)) { |
4798 | err = PTR_ERR(event); | 4792 | err = PTR_ERR(event); |
4799 | goto err_put_context; | 4793 | goto err_put_context; |
@@ -5090,7 +5084,7 @@ again: | |||
5090 | */ | 5084 | */ |
5091 | int perf_event_init_task(struct task_struct *child) | 5085 | int perf_event_init_task(struct task_struct *child) |
5092 | { | 5086 | { |
5093 | struct perf_event_context *child_ctx, *parent_ctx; | 5087 | struct perf_event_context *child_ctx = NULL, *parent_ctx; |
5094 | struct perf_event_context *cloned_ctx; | 5088 | struct perf_event_context *cloned_ctx; |
5095 | struct perf_event *event; | 5089 | struct perf_event *event; |
5096 | struct task_struct *parent = current; | 5090 | struct task_struct *parent = current; |
@@ -5106,20 +5100,6 @@ int perf_event_init_task(struct task_struct *child) | |||
5106 | return 0; | 5100 | return 0; |
5107 | 5101 | ||
5108 | /* | 5102 | /* |
5109 | * This is executed from the parent task context, so inherit | ||
5110 | * events that have been marked for cloning. | ||
5111 | * First allocate and initialize a context for the child. | ||
5112 | */ | ||
5113 | |||
5114 | child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | ||
5115 | if (!child_ctx) | ||
5116 | return -ENOMEM; | ||
5117 | |||
5118 | __perf_event_init_context(child_ctx, child); | ||
5119 | child->perf_event_ctxp = child_ctx; | ||
5120 | get_task_struct(child); | ||
5121 | |||
5122 | /* | ||
5123 | * If the parent's context is a clone, pin it so it won't get | 5103 | * If the parent's context is a clone, pin it so it won't get |
5124 | * swapped under us. | 5104 | * swapped under us. |
5125 | */ | 5105 | */ |
@@ -5149,6 +5129,26 @@ int perf_event_init_task(struct task_struct *child) | |||
5149 | continue; | 5129 | continue; |
5150 | } | 5130 | } |
5151 | 5131 | ||
5132 | if (!child->perf_event_ctxp) { | ||
5133 | /* | ||
5134 | * This is executed from the parent task context, so | ||
5135 | * inherit events that have been marked for cloning. | ||
5136 | * First allocate and initialize a context for the | ||
5137 | * child. | ||
5138 | */ | ||
5139 | |||
5140 | child_ctx = kzalloc(sizeof(struct perf_event_context), | ||
5141 | GFP_KERNEL); | ||
5142 | if (!child_ctx) { | ||
5143 | ret = -ENOMEM; | ||
5144 | goto exit; | ||
5145 | } | ||
5146 | |||
5147 | __perf_event_init_context(child_ctx, child); | ||
5148 | child->perf_event_ctxp = child_ctx; | ||
5149 | get_task_struct(child); | ||
5150 | } | ||
5151 | |||
5152 | ret = inherit_group(event, parent, parent_ctx, | 5152 | ret = inherit_group(event, parent, parent_ctx, |
5153 | child, child_ctx); | 5153 | child, child_ctx); |
5154 | if (ret) { | 5154 | if (ret) { |
@@ -5177,6 +5177,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5177 | get_ctx(child_ctx->parent_ctx); | 5177 | get_ctx(child_ctx->parent_ctx); |
5178 | } | 5178 | } |
5179 | 5179 | ||
5180 | exit: | ||
5180 | mutex_unlock(&parent_ctx->mutex); | 5181 | mutex_unlock(&parent_ctx->mutex); |
5181 | 5182 | ||
5182 | perf_unpin_context(parent_ctx); | 5183 | perf_unpin_context(parent_ctx); |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index aff5f80b59b8..b52d397e57eb 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -606,23 +606,22 @@ static int create_trace_probe(int argc, char **argv) | |||
606 | */ | 606 | */ |
607 | struct trace_probe *tp; | 607 | struct trace_probe *tp; |
608 | int i, ret = 0; | 608 | int i, ret = 0; |
609 | int is_return = 0; | 609 | int is_return = 0, is_delete = 0; |
610 | char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; | 610 | char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; |
611 | unsigned long offset = 0; | 611 | unsigned long offset = 0; |
612 | void *addr = NULL; | 612 | void *addr = NULL; |
613 | char buf[MAX_EVENT_NAME_LEN]; | 613 | char buf[MAX_EVENT_NAME_LEN]; |
614 | 614 | ||
615 | if (argc < 2) { | 615 | /* argc must be >= 1 */ |
616 | pr_info("Probe point is not specified.\n"); | ||
617 | return -EINVAL; | ||
618 | } | ||
619 | |||
620 | if (argv[0][0] == 'p') | 616 | if (argv[0][0] == 'p') |
621 | is_return = 0; | 617 | is_return = 0; |
622 | else if (argv[0][0] == 'r') | 618 | else if (argv[0][0] == 'r') |
623 | is_return = 1; | 619 | is_return = 1; |
620 | else if (argv[0][0] == '-') | ||
621 | is_delete = 1; | ||
624 | else { | 622 | else { |
625 | pr_info("Probe definition must be started with 'p' or 'r'.\n"); | 623 | pr_info("Probe definition must be started with 'p', 'r' or" |
624 | " '-'.\n"); | ||
626 | return -EINVAL; | 625 | return -EINVAL; |
627 | } | 626 | } |
628 | 627 | ||
@@ -642,7 +641,29 @@ static int create_trace_probe(int argc, char **argv) | |||
642 | return -EINVAL; | 641 | return -EINVAL; |
643 | } | 642 | } |
644 | } | 643 | } |
644 | if (!group) | ||
645 | group = KPROBE_EVENT_SYSTEM; | ||
645 | 646 | ||
647 | if (is_delete) { | ||
648 | if (!event) { | ||
649 | pr_info("Delete command needs an event name.\n"); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | tp = find_probe_event(event, group); | ||
653 | if (!tp) { | ||
654 | pr_info("Event %s/%s doesn't exist.\n", group, event); | ||
655 | return -ENOENT; | ||
656 | } | ||
657 | /* delete an event */ | ||
658 | unregister_trace_probe(tp); | ||
659 | free_trace_probe(tp); | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | if (argc < 2) { | ||
664 | pr_info("Probe point is not specified.\n"); | ||
665 | return -EINVAL; | ||
666 | } | ||
646 | if (isdigit(argv[1][0])) { | 667 | if (isdigit(argv[1][0])) { |
647 | if (is_return) { | 668 | if (is_return) { |
648 | pr_info("Return probe point must be a symbol.\n"); | 669 | pr_info("Return probe point must be a symbol.\n"); |
@@ -671,8 +692,6 @@ static int create_trace_probe(int argc, char **argv) | |||
671 | argc -= 2; argv += 2; | 692 | argc -= 2; argv += 2; |
672 | 693 | ||
673 | /* setup a probe */ | 694 | /* setup a probe */ |
674 | if (!group) | ||
675 | group = KPROBE_EVENT_SYSTEM; | ||
676 | if (!event) { | 695 | if (!event) { |
677 | /* Make a new event name */ | 696 | /* Make a new event name */ |
678 | if (symbol) | 697 | if (symbol) |
@@ -1114,7 +1133,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1114 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1133 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
1115 | 1134 | ||
1116 | ret = trace_define_common_fields(event_call); | 1135 | ret = trace_define_common_fields(event_call); |
1117 | if (!ret) | 1136 | if (ret) |
1118 | return ret; | 1137 | return ret; |
1119 | 1138 | ||
1120 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1139 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
@@ -1132,7 +1151,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1132 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1151 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
1133 | 1152 | ||
1134 | ret = trace_define_common_fields(event_call); | 1153 | ret = trace_define_common_fields(event_call); |
1135 | if (!ret) | 1154 | if (ret) |
1136 | return ret; | 1155 | return ret; |
1137 | 1156 | ||
1138 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1157 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index ddfa0fd43bc0..acb87d4a4ac1 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
@@ -79,11 +79,12 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) | |||
79 | } | 79 | } |
80 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | 80 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ |
81 | 81 | ||
82 | void ksym_hbp_handler(struct perf_event *hbp, void *data) | 82 | void ksym_hbp_handler(struct perf_event *hbp, int nmi, |
83 | struct perf_sample_data *data, | ||
84 | struct pt_regs *regs) | ||
83 | { | 85 | { |
84 | struct ring_buffer_event *event; | 86 | struct ring_buffer_event *event; |
85 | struct ksym_trace_entry *entry; | 87 | struct ksym_trace_entry *entry; |
86 | struct pt_regs *regs = data; | ||
87 | struct ring_buffer *buffer; | 88 | struct ring_buffer *buffer; |
88 | int pc; | 89 | int pc; |
89 | 90 | ||