aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c136
1 files changed, 133 insertions, 3 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3256e36ad251..3852e2656bb0 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -29,6 +29,7 @@
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31#include <linux/ftrace_event.h> 31#include <linux/ftrace_event.h>
32#include <linux/hw_breakpoint.h>
32 33
33#include <asm/irq_regs.h> 34#include <asm/irq_regs.h>
34 35
@@ -1725,6 +1726,26 @@ static int perf_release(struct inode *inode, struct file *file)
1725 return 0; 1726 return 0;
1726} 1727}
1727 1728
1729int perf_event_release_kernel(struct perf_event *event)
1730{
1731 struct perf_event_context *ctx = event->ctx;
1732
1733 WARN_ON_ONCE(ctx->parent_ctx);
1734 mutex_lock(&ctx->mutex);
1735 perf_event_remove_from_context(event);
1736 mutex_unlock(&ctx->mutex);
1737
1738 mutex_lock(&event->owner->perf_event_mutex);
1739 list_del_init(&event->owner_entry);
1740 mutex_unlock(&event->owner->perf_event_mutex);
1741 put_task_struct(event->owner);
1742
1743 free_event(event);
1744
1745 return 0;
1746}
1747EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1748
1728static int perf_event_read_size(struct perf_event *event) 1749static int perf_event_read_size(struct perf_event *event)
1729{ 1750{
1730 int entry = sizeof(u64); /* value */ 1751 int entry = sizeof(u64); /* value */
@@ -1750,7 +1771,7 @@ static int perf_event_read_size(struct perf_event *event)
1750 return size; 1771 return size;
1751} 1772}
1752 1773
1753static u64 perf_event_read_value(struct perf_event *event) 1774u64 perf_event_read_value(struct perf_event *event)
1754{ 1775{
1755 struct perf_event *child; 1776 struct perf_event *child;
1756 u64 total = 0; 1777 u64 total = 0;
@@ -1761,6 +1782,7 @@ static u64 perf_event_read_value(struct perf_event *event)
1761 1782
1762 return total; 1783 return total;
1763} 1784}
1785EXPORT_SYMBOL_GPL(perf_event_read_value);
1764 1786
1765static int perf_event_read_entry(struct perf_event *event, 1787static int perf_event_read_entry(struct perf_event *event,
1766 u64 read_format, char __user *buf) 1788 u64 read_format, char __user *buf)
@@ -4231,6 +4253,51 @@ static void perf_event_free_filter(struct perf_event *event)
4231 4253
4232#endif /* CONFIG_EVENT_PROFILE */ 4254#endif /* CONFIG_EVENT_PROFILE */
4233 4255
4256#ifdef CONFIG_HAVE_HW_BREAKPOINT
4257static void bp_perf_event_destroy(struct perf_event *event)
4258{
4259 release_bp_slot(event);
4260}
4261
4262static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4263{
4264 int err;
4265 /*
4266 * The breakpoint is already filled if we haven't created the counter
4267 * through perf syscall
4268 * FIXME: manage to get trigerred to NULL if it comes from syscalls
4269 */
4270 if (!bp->callback)
4271 err = register_perf_hw_breakpoint(bp);
4272 else
4273 err = __register_perf_hw_breakpoint(bp);
4274 if (err)
4275 return ERR_PTR(err);
4276
4277 bp->destroy = bp_perf_event_destroy;
4278
4279 return &perf_ops_bp;
4280}
4281
4282void perf_bp_event(struct perf_event *bp, void *regs)
4283{
4284 /* TODO */
4285}
4286#else
4287static void bp_perf_event_destroy(struct perf_event *event)
4288{
4289}
4290
4291static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4292{
4293 return NULL;
4294}
4295
4296void perf_bp_event(struct perf_event *bp, void *regs)
4297{
4298}
4299#endif
4300
4234atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 4301atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4235 4302
4236static void sw_perf_event_destroy(struct perf_event *event) 4303static void sw_perf_event_destroy(struct perf_event *event)
@@ -4297,6 +4364,7 @@ perf_event_alloc(struct perf_event_attr *attr,
4297 struct perf_event_context *ctx, 4364 struct perf_event_context *ctx,
4298 struct perf_event *group_leader, 4365 struct perf_event *group_leader,
4299 struct perf_event *parent_event, 4366 struct perf_event *parent_event,
4367 perf_callback_t callback,
4300 gfp_t gfpflags) 4368 gfp_t gfpflags)
4301{ 4369{
4302 const struct pmu *pmu; 4370 const struct pmu *pmu;
@@ -4339,6 +4407,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4339 4407
4340 event->state = PERF_EVENT_STATE_INACTIVE; 4408 event->state = PERF_EVENT_STATE_INACTIVE;
4341 4409
4410 if (!callback && parent_event)
4411 callback = parent_event->callback;
4412
4413 event->callback = callback;
4414
4342 if (attr->disabled) 4415 if (attr->disabled)
4343 event->state = PERF_EVENT_STATE_OFF; 4416 event->state = PERF_EVENT_STATE_OFF;
4344 4417
@@ -4373,6 +4446,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4373 pmu = tp_perf_event_init(event); 4446 pmu = tp_perf_event_init(event);
4374 break; 4447 break;
4375 4448
4449 case PERF_TYPE_BREAKPOINT:
4450 pmu = bp_perf_event_init(event);
4451 break;
4452
4453
4376 default: 4454 default:
4377 break; 4455 break;
4378 } 4456 }
@@ -4615,7 +4693,7 @@ SYSCALL_DEFINE5(perf_event_open,
4615 } 4693 }
4616 4694
4617 event = perf_event_alloc(&attr, cpu, ctx, group_leader, 4695 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4618 NULL, GFP_KERNEL); 4696 NULL, NULL, GFP_KERNEL);
4619 err = PTR_ERR(event); 4697 err = PTR_ERR(event);
4620 if (IS_ERR(event)) 4698 if (IS_ERR(event))
4621 goto err_put_context; 4699 goto err_put_context;
@@ -4663,6 +4741,58 @@ err_put_context:
4663 return err; 4741 return err;
4664} 4742}
4665 4743
4744/**
4745 * perf_event_create_kernel_counter
4746 *
4747 * @attr: attributes of the counter to create
4748 * @cpu: cpu in which the counter is bound
4749 * @pid: task to profile
4750 */
4751struct perf_event *
4752perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4753 pid_t pid, perf_callback_t callback)
4754{
4755 struct perf_event *event;
4756 struct perf_event_context *ctx;
4757 int err;
4758
4759 /*
4760 * Get the target context (task or percpu):
4761 */
4762
4763 ctx = find_get_context(pid, cpu);
4764 if (IS_ERR(ctx))
4765 return NULL;
4766
4767 event = perf_event_alloc(attr, cpu, ctx, NULL,
4768 NULL, callback, GFP_KERNEL);
4769 err = PTR_ERR(event);
4770 if (IS_ERR(event))
4771 goto err_put_context;
4772
4773 event->filp = NULL;
4774 WARN_ON_ONCE(ctx->parent_ctx);
4775 mutex_lock(&ctx->mutex);
4776 perf_install_in_context(ctx, event, cpu);
4777 ++ctx->generation;
4778 mutex_unlock(&ctx->mutex);
4779
4780 event->owner = current;
4781 get_task_struct(current);
4782 mutex_lock(&current->perf_event_mutex);
4783 list_add_tail(&event->owner_entry, &current->perf_event_list);
4784 mutex_unlock(&current->perf_event_mutex);
4785
4786 return event;
4787
4788err_put_context:
4789 if (err < 0)
4790 put_ctx(ctx);
4791
4792 return NULL;
4793}
4794EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4795
4666/* 4796/*
4667 * inherit a event from parent task to child task: 4797 * inherit a event from parent task to child task:
4668 */ 4798 */
@@ -4688,7 +4818,7 @@ inherit_event(struct perf_event *parent_event,
4688 child_event = perf_event_alloc(&parent_event->attr, 4818 child_event = perf_event_alloc(&parent_event->attr,
4689 parent_event->cpu, child_ctx, 4819 parent_event->cpu, child_ctx,
4690 group_leader, parent_event, 4820 group_leader, parent_event,
4691 GFP_KERNEL); 4821 NULL, GFP_KERNEL);
4692 if (IS_ERR(child_event)) 4822 if (IS_ERR(child_event))
4693 return child_event; 4823 return child_event;
4694 get_ctx(child_ctx); 4824 get_ctx(child_ctx);