aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c84
1 files changed, 41 insertions, 43 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 4393b9e73740..574ee58a3046 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
81void __weak hw_perf_disable(void) { barrier(); } 81void __weak hw_perf_disable(void) { barrier(); }
82void __weak hw_perf_enable(void) { barrier(); } 82void __weak hw_perf_enable(void) { barrier(); }
83 83
84void __weak hw_perf_event_setup(int cpu) { barrier(); }
85void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
86void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
87
88int __weak 84int __weak
89hw_perf_group_sched_in(struct perf_event *group_leader, 85hw_perf_group_sched_in(struct perf_event *group_leader,
90 struct perf_cpu_context *cpuctx, 86 struct perf_cpu_context *cpuctx,
@@ -97,25 +93,15 @@ void __weak perf_event_print_debug(void) { }
97 93
98static DEFINE_PER_CPU(int, perf_disable_count); 94static DEFINE_PER_CPU(int, perf_disable_count);
99 95
100void __perf_disable(void)
101{
102 __get_cpu_var(perf_disable_count)++;
103}
104
105bool __perf_enable(void)
106{
107 return !--__get_cpu_var(perf_disable_count);
108}
109
110void perf_disable(void) 96void perf_disable(void)
111{ 97{
112 __perf_disable(); 98 if (!__get_cpu_var(perf_disable_count)++)
113 hw_perf_disable(); 99 hw_perf_disable();
114} 100}
115 101
116void perf_enable(void) 102void perf_enable(void)
117{ 103{
118 if (__perf_enable()) 104 if (!--__get_cpu_var(perf_disable_count))
119 hw_perf_enable(); 105 hw_perf_enable();
120} 106}
121 107
@@ -1538,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1538 */ 1524 */
1539 if (interrupts == MAX_INTERRUPTS) { 1525 if (interrupts == MAX_INTERRUPTS) {
1540 perf_log_throttle(event, 1); 1526 perf_log_throttle(event, 1);
1527 perf_disable();
1541 event->pmu->unthrottle(event); 1528 event->pmu->unthrottle(event);
1529 perf_enable();
1542 } 1530 }
1543 1531
1544 if (!event->attr.freq || !event->attr.sample_freq) 1532 if (!event->attr.freq || !event->attr.sample_freq)
1545 continue; 1533 continue;
1546 1534
1535 perf_disable();
1547 event->pmu->read(event); 1536 event->pmu->read(event);
1548 now = atomic64_read(&event->count); 1537 now = atomic64_read(&event->count);
1549 delta = now - hwc->freq_count_stamp; 1538 delta = now - hwc->freq_count_stamp;
@@ -1551,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1551 1540
1552 if (delta > 0) 1541 if (delta > 0)
1553 perf_adjust_period(event, TICK_NSEC, delta); 1542 perf_adjust_period(event, TICK_NSEC, delta);
1543 perf_enable();
1554 } 1544 }
1555 raw_spin_unlock(&ctx->lock); 1545 raw_spin_unlock(&ctx->lock);
1556} 1546}
@@ -1560,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1560 */ 1550 */
1561static void rotate_ctx(struct perf_event_context *ctx) 1551static void rotate_ctx(struct perf_event_context *ctx)
1562{ 1552{
1563 if (!ctx->nr_events)
1564 return;
1565
1566 raw_spin_lock(&ctx->lock); 1553 raw_spin_lock(&ctx->lock);
1567 1554
1568 /* Rotate the first entry last of non-pinned groups */ 1555 /* Rotate the first entry last of non-pinned groups */
@@ -1575,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
1575{ 1562{
1576 struct perf_cpu_context *cpuctx; 1563 struct perf_cpu_context *cpuctx;
1577 struct perf_event_context *ctx; 1564 struct perf_event_context *ctx;
1565 int rotate = 0;
1578 1566
1579 if (!atomic_read(&nr_events)) 1567 if (!atomic_read(&nr_events))
1580 return; 1568 return;
1581 1569
1582 cpuctx = &__get_cpu_var(perf_cpu_context); 1570 cpuctx = &__get_cpu_var(perf_cpu_context);
1583 ctx = curr->perf_event_ctxp; 1571 if (cpuctx->ctx.nr_events &&
1572 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1573 rotate = 1;
1584 1574
1585 perf_disable(); 1575 ctx = curr->perf_event_ctxp;
1576 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1577 rotate = 1;
1586 1578
1587 perf_ctx_adjust_freq(&cpuctx->ctx); 1579 perf_ctx_adjust_freq(&cpuctx->ctx);
1588 if (ctx) 1580 if (ctx)
1589 perf_ctx_adjust_freq(ctx); 1581 perf_ctx_adjust_freq(ctx);
1590 1582
1583 if (!rotate)
1584 return;
1585
1586 perf_disable();
1591 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1587 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1592 if (ctx) 1588 if (ctx)
1593 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1589 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1599,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
1599 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1595 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1600 if (ctx) 1596 if (ctx)
1601 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1597 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1602
1603 perf_enable(); 1598 perf_enable();
1604} 1599}
1605 1600
@@ -2791,6 +2786,13 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2791 return NULL; 2786 return NULL;
2792} 2787}
2793 2788
2789#ifdef CONFIG_EVENT_TRACING
2790__weak
2791void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2792{
2793}
2794#endif
2795
2794/* 2796/*
2795 * Output 2797 * Output
2796 */ 2798 */
@@ -4318,9 +4320,8 @@ static const struct pmu perf_ops_task_clock = {
4318#ifdef CONFIG_EVENT_TRACING 4320#ifdef CONFIG_EVENT_TRACING
4319 4321
4320void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4322void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4321 int entry_size) 4323 int entry_size, struct pt_regs *regs)
4322{ 4324{
4323 struct pt_regs *regs = get_irq_regs();
4324 struct perf_sample_data data; 4325 struct perf_sample_data data;
4325 struct perf_raw_record raw = { 4326 struct perf_raw_record raw = {
4326 .size = entry_size, 4327 .size = entry_size,
@@ -4330,12 +4331,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4330 perf_sample_data_init(&data, addr); 4331 perf_sample_data_init(&data, addr);
4331 data.raw = &raw; 4332 data.raw = &raw;
4332 4333
4333 if (!regs)
4334 regs = task_pt_regs(current);
4335
4336 /* Trace events already protected against recursion */ 4334 /* Trace events already protected against recursion */
4337 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4335 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4338 &data, regs); 4336 &data, regs);
4339} 4337}
4340EXPORT_SYMBOL_GPL(perf_tp_event); 4338EXPORT_SYMBOL_GPL(perf_tp_event);
4341 4339
@@ -4351,7 +4349,7 @@ static int perf_tp_event_match(struct perf_event *event,
4351 4349
4352static void tp_perf_event_destroy(struct perf_event *event) 4350static void tp_perf_event_destroy(struct perf_event *event)
4353{ 4351{
4354 ftrace_profile_disable(event->attr.config); 4352 perf_trace_disable(event->attr.config);
4355} 4353}
4356 4354
4357static const struct pmu *tp_perf_event_init(struct perf_event *event) 4355static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4365,7 +4363,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4365 !capable(CAP_SYS_ADMIN)) 4363 !capable(CAP_SYS_ADMIN))
4366 return ERR_PTR(-EPERM); 4364 return ERR_PTR(-EPERM);
4367 4365
4368 if (ftrace_profile_enable(event->attr.config)) 4366 if (perf_trace_enable(event->attr.config))
4369 return NULL; 4367 return NULL;
4370 4368
4371 event->destroy = tp_perf_event_destroy; 4369 event->destroy = tp_perf_event_destroy;
@@ -5372,18 +5370,26 @@ int perf_event_init_task(struct task_struct *child)
5372 return ret; 5370 return ret;
5373} 5371}
5374 5372
5373static void __init perf_event_init_all_cpus(void)
5374{
5375 int cpu;
5376 struct perf_cpu_context *cpuctx;
5377
5378 for_each_possible_cpu(cpu) {
5379 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 }
5382}
5383
5375static void __cpuinit perf_event_init_cpu(int cpu) 5384static void __cpuinit perf_event_init_cpu(int cpu)
5376{ 5385{
5377 struct perf_cpu_context *cpuctx; 5386 struct perf_cpu_context *cpuctx;
5378 5387
5379 cpuctx = &per_cpu(perf_cpu_context, cpu); 5388 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 5389
5382 spin_lock(&perf_resource_lock); 5390 spin_lock(&perf_resource_lock);
5383 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 5391 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5384 spin_unlock(&perf_resource_lock); 5392 spin_unlock(&perf_resource_lock);
5385
5386 hw_perf_event_setup(cpu);
5387} 5393}
5388 5394
5389#ifdef CONFIG_HOTPLUG_CPU 5395#ifdef CONFIG_HOTPLUG_CPU
@@ -5423,20 +5429,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5423 perf_event_init_cpu(cpu); 5429 perf_event_init_cpu(cpu);
5424 break; 5430 break;
5425 5431
5426 case CPU_ONLINE:
5427 case CPU_ONLINE_FROZEN:
5428 hw_perf_event_setup_online(cpu);
5429 break;
5430
5431 case CPU_DOWN_PREPARE: 5432 case CPU_DOWN_PREPARE:
5432 case CPU_DOWN_PREPARE_FROZEN: 5433 case CPU_DOWN_PREPARE_FROZEN:
5433 perf_event_exit_cpu(cpu); 5434 perf_event_exit_cpu(cpu);
5434 break; 5435 break;
5435 5436
5436 case CPU_DEAD:
5437 hw_perf_event_setup_offline(cpu);
5438 break;
5439
5440 default: 5437 default:
5441 break; 5438 break;
5442 } 5439 }
@@ -5454,6 +5451,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
5454 5451
5455void __init perf_event_init(void) 5452void __init perf_event_init(void)
5456{ 5453{
5454 perf_event_init_all_cpus();
5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 5455 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5458 (void *)(long)smp_processor_id()); 5456 (void *)(long)smp_processor_id());
5459 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, 5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,