aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c133
1 files changed, 59 insertions, 74 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index a661e7991865..574ee58a3046 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -56,21 +56,6 @@ static atomic_t nr_task_events __read_mostly;
56 */ 56 */
57int sysctl_perf_event_paranoid __read_mostly = 1; 57int sysctl_perf_event_paranoid __read_mostly = 1;
58 58
59static inline bool perf_paranoid_tracepoint_raw(void)
60{
61 return sysctl_perf_event_paranoid > -1;
62}
63
64static inline bool perf_paranoid_cpu(void)
65{
66 return sysctl_perf_event_paranoid > 0;
67}
68
69static inline bool perf_paranoid_kernel(void)
70{
71 return sysctl_perf_event_paranoid > 1;
72}
73
74int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ 59int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
75 60
76/* 61/*
@@ -96,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
96void __weak hw_perf_disable(void) { barrier(); } 81void __weak hw_perf_disable(void) { barrier(); }
97void __weak hw_perf_enable(void) { barrier(); } 82void __weak hw_perf_enable(void) { barrier(); }
98 83
99void __weak hw_perf_event_setup(int cpu) { barrier(); }
100void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
101void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
102
103int __weak 84int __weak
104hw_perf_group_sched_in(struct perf_event *group_leader, 85hw_perf_group_sched_in(struct perf_event *group_leader,
105 struct perf_cpu_context *cpuctx, 86 struct perf_cpu_context *cpuctx,
@@ -112,25 +93,15 @@ void __weak perf_event_print_debug(void) { }
112 93
113static DEFINE_PER_CPU(int, perf_disable_count); 94static DEFINE_PER_CPU(int, perf_disable_count);
114 95
115void __perf_disable(void)
116{
117 __get_cpu_var(perf_disable_count)++;
118}
119
120bool __perf_enable(void)
121{
122 return !--__get_cpu_var(perf_disable_count);
123}
124
125void perf_disable(void) 96void perf_disable(void)
126{ 97{
127 __perf_disable(); 98 if (!__get_cpu_var(perf_disable_count)++)
128 hw_perf_disable(); 99 hw_perf_disable();
129} 100}
130 101
131void perf_enable(void) 102void perf_enable(void)
132{ 103{
133 if (__perf_enable()) 104 if (!--__get_cpu_var(perf_disable_count))
134 hw_perf_enable(); 105 hw_perf_enable();
135} 106}
136 107
@@ -1553,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1553 */ 1524 */
1554 if (interrupts == MAX_INTERRUPTS) { 1525 if (interrupts == MAX_INTERRUPTS) {
1555 perf_log_throttle(event, 1); 1526 perf_log_throttle(event, 1);
1527 perf_disable();
1556 event->pmu->unthrottle(event); 1528 event->pmu->unthrottle(event);
1529 perf_enable();
1557 } 1530 }
1558 1531
1559 if (!event->attr.freq || !event->attr.sample_freq) 1532 if (!event->attr.freq || !event->attr.sample_freq)
1560 continue; 1533 continue;
1561 1534
1535 perf_disable();
1562 event->pmu->read(event); 1536 event->pmu->read(event);
1563 now = atomic64_read(&event->count); 1537 now = atomic64_read(&event->count);
1564 delta = now - hwc->freq_count_stamp; 1538 delta = now - hwc->freq_count_stamp;
@@ -1566,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1566 1540
1567 if (delta > 0) 1541 if (delta > 0)
1568 perf_adjust_period(event, TICK_NSEC, delta); 1542 perf_adjust_period(event, TICK_NSEC, delta);
1543 perf_enable();
1569 } 1544 }
1570 raw_spin_unlock(&ctx->lock); 1545 raw_spin_unlock(&ctx->lock);
1571} 1546}
@@ -1575,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1575 */ 1550 */
1576static void rotate_ctx(struct perf_event_context *ctx) 1551static void rotate_ctx(struct perf_event_context *ctx)
1577{ 1552{
1578 if (!ctx->nr_events)
1579 return;
1580
1581 raw_spin_lock(&ctx->lock); 1553 raw_spin_lock(&ctx->lock);
1582 1554
1583 /* Rotate the first entry last of non-pinned groups */ 1555 /* Rotate the first entry last of non-pinned groups */
@@ -1590,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
1590{ 1562{
1591 struct perf_cpu_context *cpuctx; 1563 struct perf_cpu_context *cpuctx;
1592 struct perf_event_context *ctx; 1564 struct perf_event_context *ctx;
1565 int rotate = 0;
1593 1566
1594 if (!atomic_read(&nr_events)) 1567 if (!atomic_read(&nr_events))
1595 return; 1568 return;
1596 1569
1597 cpuctx = &__get_cpu_var(perf_cpu_context); 1570 cpuctx = &__get_cpu_var(perf_cpu_context);
1598 ctx = curr->perf_event_ctxp; 1571 if (cpuctx->ctx.nr_events &&
1572 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1573 rotate = 1;
1599 1574
1600 perf_disable(); 1575 ctx = curr->perf_event_ctxp;
1576 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1577 rotate = 1;
1601 1578
1602 perf_ctx_adjust_freq(&cpuctx->ctx); 1579 perf_ctx_adjust_freq(&cpuctx->ctx);
1603 if (ctx) 1580 if (ctx)
1604 perf_ctx_adjust_freq(ctx); 1581 perf_ctx_adjust_freq(ctx);
1605 1582
1583 if (!rotate)
1584 return;
1585
1586 perf_disable();
1606 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1587 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1607 if (ctx) 1588 if (ctx)
1608 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1589 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1614,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
1614 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1595 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1615 if (ctx) 1596 if (ctx)
1616 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1597 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1617
1618 perf_enable(); 1598 perf_enable();
1619} 1599}
1620 1600
@@ -2610,7 +2590,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2610 if (user_locked > user_lock_limit) 2590 if (user_locked > user_lock_limit)
2611 extra = user_locked - user_lock_limit; 2591 extra = user_locked - user_lock_limit;
2612 2592
2613 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 2593 lock_limit = rlimit(RLIMIT_MEMLOCK);
2614 lock_limit >>= PAGE_SHIFT; 2594 lock_limit >>= PAGE_SHIFT;
2615 locked = vma->vm_mm->locked_vm + extra; 2595 locked = vma->vm_mm->locked_vm + extra;
2616 2596
@@ -2806,6 +2786,13 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2806 return NULL; 2786 return NULL;
2807} 2787}
2808 2788
2789#ifdef CONFIG_EVENT_TRACING
2790__weak
2791void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2792{
2793}
2794#endif
2795
2809/* 2796/*
2810 * Output 2797 * Output
2811 */ 2798 */
@@ -4123,8 +4110,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4123 if (rctx < 0) 4110 if (rctx < 0)
4124 return; 4111 return;
4125 4112
4126 data.addr = addr; 4113 perf_sample_data_init(&data, addr);
4127 data.raw = NULL;
4128 4114
4129 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); 4115 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4130 4116
@@ -4169,11 +4155,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4169 struct perf_event *event; 4155 struct perf_event *event;
4170 u64 period; 4156 u64 period;
4171 4157
4172 event = container_of(hrtimer, struct perf_event, hw.hrtimer); 4158 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4173 event->pmu->read(event); 4159 event->pmu->read(event);
4174 4160
4175 data.addr = 0; 4161 perf_sample_data_init(&data, 0);
4176 data.raw = NULL;
4177 data.period = event->hw.last_period; 4162 data.period = event->hw.last_period;
4178 regs = get_irq_regs(); 4163 regs = get_irq_regs();
4179 /* 4164 /*
@@ -4335,26 +4320,20 @@ static const struct pmu perf_ops_task_clock = {
4335#ifdef CONFIG_EVENT_TRACING 4320#ifdef CONFIG_EVENT_TRACING
4336 4321
4337void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4322void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4338 int entry_size) 4323 int entry_size, struct pt_regs *regs)
4339{ 4324{
4325 struct perf_sample_data data;
4340 struct perf_raw_record raw = { 4326 struct perf_raw_record raw = {
4341 .size = entry_size, 4327 .size = entry_size,
4342 .data = record, 4328 .data = record,
4343 }; 4329 };
4344 4330
4345 struct perf_sample_data data = { 4331 perf_sample_data_init(&data, addr);
4346 .addr = addr, 4332 data.raw = &raw;
4347 .raw = &raw,
4348 };
4349
4350 struct pt_regs *regs = get_irq_regs();
4351
4352 if (!regs)
4353 regs = task_pt_regs(current);
4354 4333
4355 /* Trace events already protected against recursion */ 4334 /* Trace events already protected against recursion */
4356 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4335 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4357 &data, regs); 4336 &data, regs);
4358} 4337}
4359EXPORT_SYMBOL_GPL(perf_tp_event); 4338EXPORT_SYMBOL_GPL(perf_tp_event);
4360 4339
@@ -4370,7 +4349,7 @@ static int perf_tp_event_match(struct perf_event *event,
4370 4349
4371static void tp_perf_event_destroy(struct perf_event *event) 4350static void tp_perf_event_destroy(struct perf_event *event)
4372{ 4351{
4373 ftrace_profile_disable(event->attr.config); 4352 perf_trace_disable(event->attr.config);
4374} 4353}
4375 4354
4376static const struct pmu *tp_perf_event_init(struct perf_event *event) 4355static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4384,7 +4363,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4384 !capable(CAP_SYS_ADMIN)) 4363 !capable(CAP_SYS_ADMIN))
4385 return ERR_PTR(-EPERM); 4364 return ERR_PTR(-EPERM);
4386 4365
4387 if (ftrace_profile_enable(event->attr.config)) 4366 if (perf_trace_enable(event->attr.config))
4388 return NULL; 4367 return NULL;
4389 4368
4390 event->destroy = tp_perf_event_destroy; 4369 event->destroy = tp_perf_event_destroy;
@@ -4463,8 +4442,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
4463 struct perf_sample_data sample; 4442 struct perf_sample_data sample;
4464 struct pt_regs *regs = data; 4443 struct pt_regs *regs = data;
4465 4444
4466 sample.raw = NULL; 4445 perf_sample_data_init(&sample, bp->attr.bp_addr);
4467 sample.addr = bp->attr.bp_addr;
4468 4446
4469 if (!perf_exclude_event(bp, regs)) 4447 if (!perf_exclude_event(bp, regs))
4470 perf_swevent_add(bp, 1, 1, &sample, regs); 4448 perf_swevent_add(bp, 1, 1, &sample, regs);
@@ -5392,18 +5370,26 @@ int perf_event_init_task(struct task_struct *child)
5392 return ret; 5370 return ret;
5393} 5371}
5394 5372
5373static void __init perf_event_init_all_cpus(void)
5374{
5375 int cpu;
5376 struct perf_cpu_context *cpuctx;
5377
5378 for_each_possible_cpu(cpu) {
5379 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 }
5382}
5383
5395static void __cpuinit perf_event_init_cpu(int cpu) 5384static void __cpuinit perf_event_init_cpu(int cpu)
5396{ 5385{
5397 struct perf_cpu_context *cpuctx; 5386 struct perf_cpu_context *cpuctx;
5398 5387
5399 cpuctx = &per_cpu(perf_cpu_context, cpu); 5388 cpuctx = &per_cpu(perf_cpu_context, cpu);
5400 __perf_event_init_context(&cpuctx->ctx, NULL);
5401 5389
5402 spin_lock(&perf_resource_lock); 5390 spin_lock(&perf_resource_lock);
5403 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 5391 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5404 spin_unlock(&perf_resource_lock); 5392 spin_unlock(&perf_resource_lock);
5405
5406 hw_perf_event_setup(cpu);
5407} 5393}
5408 5394
5409#ifdef CONFIG_HOTPLUG_CPU 5395#ifdef CONFIG_HOTPLUG_CPU
@@ -5443,20 +5429,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5443 perf_event_init_cpu(cpu); 5429 perf_event_init_cpu(cpu);
5444 break; 5430 break;
5445 5431
5446 case CPU_ONLINE:
5447 case CPU_ONLINE_FROZEN:
5448 hw_perf_event_setup_online(cpu);
5449 break;
5450
5451 case CPU_DOWN_PREPARE: 5432 case CPU_DOWN_PREPARE:
5452 case CPU_DOWN_PREPARE_FROZEN: 5433 case CPU_DOWN_PREPARE_FROZEN:
5453 perf_event_exit_cpu(cpu); 5434 perf_event_exit_cpu(cpu);
5454 break; 5435 break;
5455 5436
5456 case CPU_DEAD:
5457 hw_perf_event_setup_offline(cpu);
5458 break;
5459
5460 default: 5437 default:
5461 break; 5438 break;
5462 } 5439 }
@@ -5474,6 +5451,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
5474 5451
5475void __init perf_event_init(void) 5452void __init perf_event_init(void)
5476{ 5453{
5454 perf_event_init_all_cpus();
5477 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 5455 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5478 (void *)(long)smp_processor_id()); 5456 (void *)(long)smp_processor_id());
5479 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, 5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
@@ -5481,13 +5459,16 @@ void __init perf_event_init(void)
5481 register_cpu_notifier(&perf_cpu_nb); 5459 register_cpu_notifier(&perf_cpu_nb);
5482} 5460}
5483 5461
5484static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) 5462static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
5463 struct sysdev_class_attribute *attr,
5464 char *buf)
5485{ 5465{
5486 return sprintf(buf, "%d\n", perf_reserved_percpu); 5466 return sprintf(buf, "%d\n", perf_reserved_percpu);
5487} 5467}
5488 5468
5489static ssize_t 5469static ssize_t
5490perf_set_reserve_percpu(struct sysdev_class *class, 5470perf_set_reserve_percpu(struct sysdev_class *class,
5471 struct sysdev_class_attribute *attr,
5491 const char *buf, 5472 const char *buf,
5492 size_t count) 5473 size_t count)
5493{ 5474{
@@ -5516,13 +5497,17 @@ perf_set_reserve_percpu(struct sysdev_class *class,
5516 return count; 5497 return count;
5517} 5498}
5518 5499
5519static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) 5500static ssize_t perf_show_overcommit(struct sysdev_class *class,
5501 struct sysdev_class_attribute *attr,
5502 char *buf)
5520{ 5503{
5521 return sprintf(buf, "%d\n", perf_overcommit); 5504 return sprintf(buf, "%d\n", perf_overcommit);
5522} 5505}
5523 5506
5524static ssize_t 5507static ssize_t
5525perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) 5508perf_set_overcommit(struct sysdev_class *class,
5509 struct sysdev_class_attribute *attr,
5510 const char *buf, size_t count)
5526{ 5511{
5527 unsigned long val; 5512 unsigned long val;
5528 int err; 5513 int err;