aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c80
1 files changed, 63 insertions, 17 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2c47ed6c4f26..d75e4c8727f9 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -78,6 +78,25 @@ void perf_pmu_enable(struct pmu *pmu)
78 pmu->pmu_enable(pmu); 78 pmu->pmu_enable(pmu);
79} 79}
80 80
81static void perf_pmu_rotate_start(void)
82{
83 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
84
85 if (hrtimer_active(&cpuctx->timer))
86 return;
87
88 __hrtimer_start_range_ns(&cpuctx->timer,
89 ns_to_ktime(cpuctx->timer_interval), 0,
90 HRTIMER_MODE_REL_PINNED, 0);
91}
92
93static void perf_pmu_rotate_stop(void)
94{
95 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
96
97 hrtimer_cancel(&cpuctx->timer);
98}
99
81static void get_ctx(struct perf_event_context *ctx) 100static void get_ctx(struct perf_event_context *ctx)
82{ 101{
83 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 102 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
@@ -281,6 +300,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
281 } 300 }
282 301
283 list_add_rcu(&event->event_entry, &ctx->event_list); 302 list_add_rcu(&event->event_entry, &ctx->event_list);
303 if (!ctx->nr_events)
304 perf_pmu_rotate_start();
284 ctx->nr_events++; 305 ctx->nr_events++;
285 if (event->attr.inherit_stat) 306 if (event->attr.inherit_stat)
286 ctx->nr_stat++; 307 ctx->nr_stat++;
@@ -1383,6 +1404,12 @@ void perf_event_task_sched_in(struct task_struct *task)
1383 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); 1404 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1384 1405
1385 cpuctx->task_ctx = ctx; 1406 cpuctx->task_ctx = ctx;
1407
1408 /*
1409 * Since these rotations are per-cpu, we need to ensure the
1410 * cpu-context we got scheduled on is actually rotating.
1411 */
1412 perf_pmu_rotate_start();
1386} 1413}
1387 1414
1388#define MAX_INTERRUPTS (~0ULL) 1415#define MAX_INTERRUPTS (~0ULL)
@@ -1487,7 +1514,7 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1487 } 1514 }
1488} 1515}
1489 1516
1490static void perf_ctx_adjust_freq(struct perf_event_context *ctx) 1517static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
1491{ 1518{
1492 struct perf_event *event; 1519 struct perf_event *event;
1493 struct hw_perf_event *hwc; 1520 struct hw_perf_event *hwc;
@@ -1524,7 +1551,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1524 hwc->freq_count_stamp = now; 1551 hwc->freq_count_stamp = now;
1525 1552
1526 if (delta > 0) 1553 if (delta > 0)
1527 perf_adjust_period(event, TICK_NSEC, delta); 1554 perf_adjust_period(event, period, delta);
1528 } 1555 }
1529 raw_spin_unlock(&ctx->lock); 1556 raw_spin_unlock(&ctx->lock);
1530} 1557}
@@ -1542,30 +1569,39 @@ static void rotate_ctx(struct perf_event_context *ctx)
1542 raw_spin_unlock(&ctx->lock); 1569 raw_spin_unlock(&ctx->lock);
1543} 1570}
1544 1571
1545void perf_event_task_tick(struct task_struct *curr) 1572/*
1573 * Cannot race with ->pmu_rotate_start() because this is ran from hardirq
1574 * context, and ->pmu_rotate_start() is called with irqs disabled (both are
1575 * cpu affine, so there are no SMP races).
1576 */
1577static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
1546{ 1578{
1579 enum hrtimer_restart restart = HRTIMER_NORESTART;
1547 struct perf_cpu_context *cpuctx; 1580 struct perf_cpu_context *cpuctx;
1548 struct perf_event_context *ctx; 1581 struct perf_event_context *ctx;
1549 int rotate = 0; 1582 int rotate = 0;
1550 1583
1551 if (!atomic_read(&nr_events)) 1584 cpuctx = container_of(timer, struct perf_cpu_context, timer);
1552 return;
1553 1585
1554 cpuctx = &__get_cpu_var(perf_cpu_context); 1586 if (cpuctx->ctx.nr_events) {
1555 if (cpuctx->ctx.nr_events && 1587 restart = HRTIMER_RESTART;
1556 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 1588 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1557 rotate = 1; 1589 rotate = 1;
1590 }
1558 1591
1559 ctx = curr->perf_event_ctxp; 1592 ctx = current->perf_event_ctxp;
1560 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) 1593 if (ctx && ctx->nr_events) {
1561 rotate = 1; 1594 restart = HRTIMER_RESTART;
1595 if (ctx->nr_events != ctx->nr_active)
1596 rotate = 1;
1597 }
1562 1598
1563 perf_ctx_adjust_freq(&cpuctx->ctx); 1599 perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval);
1564 if (ctx) 1600 if (ctx)
1565 perf_ctx_adjust_freq(ctx); 1601 perf_ctx_adjust_freq(ctx, cpuctx->timer_interval);
1566 1602
1567 if (!rotate) 1603 if (!rotate)
1568 return; 1604 goto done;
1569 1605
1570 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1606 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1571 if (ctx) 1607 if (ctx)
@@ -1577,7 +1613,12 @@ void perf_event_task_tick(struct task_struct *curr)
1577 1613
1578 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1614 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1579 if (ctx) 1615 if (ctx)
1580 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1616 task_ctx_sched_in(current, EVENT_FLEXIBLE);
1617
1618done:
1619 hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval));
1620
1621 return restart;
1581} 1622}
1582 1623
1583static int event_enable_on_exec(struct perf_event *event, 1624static int event_enable_on_exec(struct perf_event *event,
@@ -4786,7 +4827,7 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
4786 } 4827 }
4787 __hrtimer_start_range_ns(&hwc->hrtimer, 4828 __hrtimer_start_range_ns(&hwc->hrtimer,
4788 ns_to_ktime(period), 0, 4829 ns_to_ktime(period), 0,
4789 HRTIMER_MODE_REL, 0); 4830 HRTIMER_MODE_REL_PINNED, 0);
4790 } 4831 }
4791} 4832}
4792 4833
@@ -5904,6 +5945,9 @@ static void __init perf_event_init_all_cpus(void)
5904 5945
5905 cpuctx = &per_cpu(perf_cpu_context, cpu); 5946 cpuctx = &per_cpu(perf_cpu_context, cpu);
5906 __perf_event_init_context(&cpuctx->ctx, NULL); 5947 __perf_event_init_context(&cpuctx->ctx, NULL);
5948 cpuctx->timer_interval = TICK_NSEC;
5949 hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5950 cpuctx->timer.function = perf_event_context_tick;
5907 } 5951 }
5908} 5952}
5909 5953
@@ -5934,6 +5978,8 @@ static void __perf_event_exit_cpu(void *info)
5934 struct perf_event_context *ctx = &cpuctx->ctx; 5978 struct perf_event_context *ctx = &cpuctx->ctx;
5935 struct perf_event *event, *tmp; 5979 struct perf_event *event, *tmp;
5936 5980
5981 perf_pmu_rotate_stop();
5982
5937 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) 5983 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5938 __perf_event_remove_from_context(event); 5984 __perf_event_remove_from_context(event);
5939 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) 5985 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)