diff options
| author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2010-12-13 10:53:31 -0500 |
|---|---|---|
| committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2010-12-13 10:53:31 -0500 |
| commit | 474b9c86b0c65e9ca6a77d8b7bf132c4d5993b9c (patch) | |
| tree | 9ce213f7a268d13f8871b84f1d22c2b9ff55afcf /kernel/perf_event.c | |
| parent | 49db7e7b995f5c61c5e24198f833ed01d99f5e7d (diff) | |
| parent | fdea0571ddca8e3f22448f66d72a034575abea28 (diff) | |
Merge branch 'topic/asoc' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6 into for-2.6.38
Diffstat (limited to 'kernel/perf_event.c')
| -rw-r--r-- | kernel/perf_event.c | 93 |
1 files changed, 77 insertions, 16 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index cb6c0d2af68f..eac7e3364335 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/kernel_stat.h> | 31 | #include <linux/kernel_stat.h> |
| 32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
| 33 | #include <linux/ftrace_event.h> | 33 | #include <linux/ftrace_event.h> |
| 34 | #include <linux/hw_breakpoint.h> | ||
| 34 | 35 | ||
| 35 | #include <asm/irq_regs.h> | 36 | #include <asm/irq_regs.h> |
| 36 | 37 | ||
| @@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
| 1286 | { | 1287 | { |
| 1287 | int ctxn; | 1288 | int ctxn; |
| 1288 | 1289 | ||
| 1289 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | ||
| 1290 | |||
| 1291 | for_each_task_context_nr(ctxn) | 1290 | for_each_task_context_nr(ctxn) |
| 1292 | perf_event_context_sched_out(task, ctxn, next); | 1291 | perf_event_context_sched_out(task, ctxn, next); |
| 1293 | } | 1292 | } |
| @@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
| 1621 | { | 1620 | { |
| 1622 | raw_spin_lock(&ctx->lock); | 1621 | raw_spin_lock(&ctx->lock); |
| 1623 | 1622 | ||
| 1624 | /* Rotate the first entry last of non-pinned groups */ | 1623 | /* |
| 1625 | list_rotate_left(&ctx->flexible_groups); | 1624 | * Rotate the first entry last of non-pinned groups. Rotation might be |
| 1625 | * disabled by the inheritance code. | ||
| 1626 | */ | ||
| 1627 | if (!ctx->rotate_disable) | ||
| 1628 | list_rotate_left(&ctx->flexible_groups); | ||
| 1626 | 1629 | ||
| 1627 | raw_spin_unlock(&ctx->lock); | 1630 | raw_spin_unlock(&ctx->lock); |
| 1628 | } | 1631 | } |
| @@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event) | |||
| 2234 | raw_spin_unlock_irq(&ctx->lock); | 2237 | raw_spin_unlock_irq(&ctx->lock); |
| 2235 | mutex_unlock(&ctx->mutex); | 2238 | mutex_unlock(&ctx->mutex); |
| 2236 | 2239 | ||
| 2237 | mutex_lock(&event->owner->perf_event_mutex); | ||
| 2238 | list_del_init(&event->owner_entry); | ||
| 2239 | mutex_unlock(&event->owner->perf_event_mutex); | ||
| 2240 | put_task_struct(event->owner); | ||
| 2241 | |||
| 2242 | free_event(event); | 2240 | free_event(event); |
| 2243 | 2241 | ||
| 2244 | return 0; | 2242 | return 0; |
| @@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); | |||
| 2251 | static int perf_release(struct inode *inode, struct file *file) | 2249 | static int perf_release(struct inode *inode, struct file *file) |
| 2252 | { | 2250 | { |
| 2253 | struct perf_event *event = file->private_data; | 2251 | struct perf_event *event = file->private_data; |
| 2252 | struct task_struct *owner; | ||
| 2254 | 2253 | ||
| 2255 | file->private_data = NULL; | 2254 | file->private_data = NULL; |
| 2256 | 2255 | ||
| 2256 | rcu_read_lock(); | ||
| 2257 | owner = ACCESS_ONCE(event->owner); | ||
| 2258 | /* | ||
| 2259 | * Matches the smp_wmb() in perf_event_exit_task(). If we observe | ||
| 2260 | * !owner it means the list deletion is complete and we can indeed | ||
| 2261 | * free this event, otherwise we need to serialize on | ||
| 2262 | * owner->perf_event_mutex. | ||
| 2263 | */ | ||
| 2264 | smp_read_barrier_depends(); | ||
| 2265 | if (owner) { | ||
| 2266 | /* | ||
| 2267 | * Since delayed_put_task_struct() also drops the last | ||
| 2268 | * task reference we can safely take a new reference | ||
| 2269 | * while holding the rcu_read_lock(). | ||
| 2270 | */ | ||
| 2271 | get_task_struct(owner); | ||
| 2272 | } | ||
| 2273 | rcu_read_unlock(); | ||
| 2274 | |||
| 2275 | if (owner) { | ||
| 2276 | mutex_lock(&owner->perf_event_mutex); | ||
| 2277 | /* | ||
| 2278 | * We have to re-check the event->owner field, if it is cleared | ||
| 2279 | * we raced with perf_event_exit_task(), acquiring the mutex | ||
| 2280 | * ensured they're done, and we can proceed with freeing the | ||
| 2281 | * event. | ||
| 2282 | */ | ||
| 2283 | if (event->owner) | ||
| 2284 | list_del_init(&event->owner_entry); | ||
| 2285 | mutex_unlock(&owner->perf_event_mutex); | ||
| 2286 | put_task_struct(owner); | ||
| 2287 | } | ||
| 2288 | |||
| 2257 | return perf_event_release_kernel(event); | 2289 | return perf_event_release_kernel(event); |
| 2258 | } | 2290 | } |
| 2259 | 2291 | ||
| @@ -5677,7 +5709,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 5677 | mutex_unlock(&ctx->mutex); | 5709 | mutex_unlock(&ctx->mutex); |
| 5678 | 5710 | ||
| 5679 | event->owner = current; | 5711 | event->owner = current; |
| 5680 | get_task_struct(current); | 5712 | |
| 5681 | mutex_lock(¤t->perf_event_mutex); | 5713 | mutex_lock(¤t->perf_event_mutex); |
| 5682 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | 5714 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); |
| 5683 | mutex_unlock(¤t->perf_event_mutex); | 5715 | mutex_unlock(¤t->perf_event_mutex); |
| @@ -5745,12 +5777,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
| 5745 | ++ctx->generation; | 5777 | ++ctx->generation; |
| 5746 | mutex_unlock(&ctx->mutex); | 5778 | mutex_unlock(&ctx->mutex); |
| 5747 | 5779 | ||
| 5748 | event->owner = current; | ||
| 5749 | get_task_struct(current); | ||
| 5750 | mutex_lock(¤t->perf_event_mutex); | ||
| 5751 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); | ||
| 5752 | mutex_unlock(¤t->perf_event_mutex); | ||
| 5753 | |||
| 5754 | return event; | 5780 | return event; |
| 5755 | 5781 | ||
| 5756 | err_free: | 5782 | err_free: |
| @@ -5901,8 +5927,24 @@ again: | |||
| 5901 | */ | 5927 | */ |
| 5902 | void perf_event_exit_task(struct task_struct *child) | 5928 | void perf_event_exit_task(struct task_struct *child) |
| 5903 | { | 5929 | { |
| 5930 | struct perf_event *event, *tmp; | ||
| 5904 | int ctxn; | 5931 | int ctxn; |
| 5905 | 5932 | ||
| 5933 | mutex_lock(&child->perf_event_mutex); | ||
| 5934 | list_for_each_entry_safe(event, tmp, &child->perf_event_list, | ||
| 5935 | owner_entry) { | ||
| 5936 | list_del_init(&event->owner_entry); | ||
| 5937 | |||
| 5938 | /* | ||
| 5939 | * Ensure the list deletion is visible before we clear | ||
| 5940 | * the owner, closes a race against perf_release() where | ||
| 5941 | * we need to serialize on the owner->perf_event_mutex. | ||
| 5942 | */ | ||
| 5943 | smp_wmb(); | ||
| 5944 | event->owner = NULL; | ||
| 5945 | } | ||
| 5946 | mutex_unlock(&child->perf_event_mutex); | ||
| 5947 | |||
| 5906 | for_each_task_context_nr(ctxn) | 5948 | for_each_task_context_nr(ctxn) |
| 5907 | perf_event_exit_task_context(child, ctxn); | 5949 | perf_event_exit_task_context(child, ctxn); |
| 5908 | } | 5950 | } |
| @@ -6122,6 +6164,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6122 | struct perf_event *event; | 6164 | struct perf_event *event; |
| 6123 | struct task_struct *parent = current; | 6165 | struct task_struct *parent = current; |
| 6124 | int inherited_all = 1; | 6166 | int inherited_all = 1; |
| 6167 | unsigned long flags; | ||
| 6125 | int ret = 0; | 6168 | int ret = 0; |
| 6126 | 6169 | ||
| 6127 | child->perf_event_ctxp[ctxn] = NULL; | 6170 | child->perf_event_ctxp[ctxn] = NULL; |
| @@ -6162,6 +6205,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6162 | break; | 6205 | break; |
| 6163 | } | 6206 | } |
| 6164 | 6207 | ||
| 6208 | /* | ||
| 6209 | * We can't hold ctx->lock when iterating the ->flexible_group list due | ||
| 6210 | * to allocations, but we need to prevent rotation because | ||
| 6211 | * rotate_ctx() will change the list from interrupt context. | ||
| 6212 | */ | ||
| 6213 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | ||
| 6214 | parent_ctx->rotate_disable = 1; | ||
| 6215 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
| 6216 | |||
| 6165 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { | 6217 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { |
| 6166 | ret = inherit_task_group(event, parent, parent_ctx, | 6218 | ret = inherit_task_group(event, parent, parent_ctx, |
| 6167 | child, ctxn, &inherited_all); | 6219 | child, ctxn, &inherited_all); |
| @@ -6169,6 +6221,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6169 | break; | 6221 | break; |
| 6170 | } | 6222 | } |
| 6171 | 6223 | ||
| 6224 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | ||
| 6225 | parent_ctx->rotate_disable = 0; | ||
| 6226 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
| 6227 | |||
| 6172 | child_ctx = child->perf_event_ctxp[ctxn]; | 6228 | child_ctx = child->perf_event_ctxp[ctxn]; |
| 6173 | 6229 | ||
| 6174 | if (child_ctx && inherited_all) { | 6230 | if (child_ctx && inherited_all) { |
| @@ -6321,6 +6377,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
| 6321 | 6377 | ||
| 6322 | void __init perf_event_init(void) | 6378 | void __init perf_event_init(void) |
| 6323 | { | 6379 | { |
| 6380 | int ret; | ||
| 6381 | |||
| 6324 | perf_event_init_all_cpus(); | 6382 | perf_event_init_all_cpus(); |
| 6325 | init_srcu_struct(&pmus_srcu); | 6383 | init_srcu_struct(&pmus_srcu); |
| 6326 | perf_pmu_register(&perf_swevent); | 6384 | perf_pmu_register(&perf_swevent); |
| @@ -6328,4 +6386,7 @@ void __init perf_event_init(void) | |||
| 6328 | perf_pmu_register(&perf_task_clock); | 6386 | perf_pmu_register(&perf_task_clock); |
| 6329 | perf_tp_register(); | 6387 | perf_tp_register(); |
| 6330 | perf_cpu_notifier(perf_cpu_notify); | 6388 | perf_cpu_notifier(perf_cpu_notify); |
| 6389 | |||
| 6390 | ret = init_hw_breakpoint(); | ||
| 6391 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | ||
| 6331 | } | 6392 | } |
