aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-17 05:28:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-17 06:48:48 -0400
commite9d2b064149ff7ef4acbc65a1b9374ac8b218d3e (patch)
tree6ce885c6052a6e05046b9937376107e4bcf378fe /kernel
parent917bdd1c9b7b0f4c22f2504c2f0c1074c8ab9df7 (diff)
perf: Undo the per cpu-context timer stuff
Revert the timer per cpu-context timers because of unfortunate nohz interaction. Fixing that would have been somewhat ugly, so go back to driving things from the regular tick. Provide a jiffies interval feature for people who want slower rotations. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <20100917093009.519845633@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c79
-rw-r--r--kernel/sched.c2
2 files changed, 51 insertions, 30 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 27332e5f51a7..baae1367e945 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -77,23 +77,22 @@ void perf_pmu_enable(struct pmu *pmu)
77 pmu->pmu_enable(pmu); 77 pmu->pmu_enable(pmu);
78} 78}
79 79
80static DEFINE_PER_CPU(struct list_head, rotation_list);
81
82/*
83 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
84 * because they're strictly cpu affine and rotate_start is called with IRQs
85 * disabled, while rotate_context is called from IRQ context.
86 */
80static void perf_pmu_rotate_start(struct pmu *pmu) 87static void perf_pmu_rotate_start(struct pmu *pmu)
81{ 88{
82 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 89 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
90 struct list_head *head = &__get_cpu_var(rotation_list);
83 91
84 if (hrtimer_active(&cpuctx->timer)) 92 WARN_ON(!irqs_disabled());
85 return;
86 93
87 __hrtimer_start_range_ns(&cpuctx->timer, 94 if (list_empty(&cpuctx->rotation_list))
88 ns_to_ktime(cpuctx->timer_interval), 0, 95 list_add(&cpuctx->rotation_list, head);
89 HRTIMER_MODE_REL_PINNED, 0);
90}
91
92static void perf_pmu_rotate_stop(struct pmu *pmu)
93{
94 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95
96 hrtimer_cancel(&cpuctx->timer);
97} 96}
98 97
99static void get_ctx(struct perf_event_context *ctx) 98static void get_ctx(struct perf_event_context *ctx)
@@ -1607,36 +1606,33 @@ static void rotate_ctx(struct perf_event_context *ctx)
1607} 1606}
1608 1607
1609/* 1608/*
1610 * Cannot race with ->pmu_rotate_start() because this is ran from hardirq 1609 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1611 * context, and ->pmu_rotate_start() is called with irqs disabled (both are 1610 * because they're strictly cpu affine and rotate_start is called with IRQs
1612 * cpu affine, so there are no SMP races). 1611 * disabled, while rotate_context is called from IRQ context.
1613 */ 1612 */
1614static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) 1613static void perf_rotate_context(struct perf_cpu_context *cpuctx)
1615{ 1614{
1616 enum hrtimer_restart restart = HRTIMER_NORESTART; 1615 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
1617 struct perf_cpu_context *cpuctx;
1618 struct perf_event_context *ctx = NULL; 1616 struct perf_event_context *ctx = NULL;
1619 int rotate = 0; 1617 int rotate = 0, remove = 1;
1620
1621 cpuctx = container_of(timer, struct perf_cpu_context, timer);
1622 1618
1623 if (cpuctx->ctx.nr_events) { 1619 if (cpuctx->ctx.nr_events) {
1624 restart = HRTIMER_RESTART; 1620 remove = 0;
1625 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 1621 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1626 rotate = 1; 1622 rotate = 1;
1627 } 1623 }
1628 1624
1629 ctx = cpuctx->task_ctx; 1625 ctx = cpuctx->task_ctx;
1630 if (ctx && ctx->nr_events) { 1626 if (ctx && ctx->nr_events) {
1631 restart = HRTIMER_RESTART; 1627 remove = 0;
1632 if (ctx->nr_events != ctx->nr_active) 1628 if (ctx->nr_events != ctx->nr_active)
1633 rotate = 1; 1629 rotate = 1;
1634 } 1630 }
1635 1631
1636 perf_pmu_disable(cpuctx->ctx.pmu); 1632 perf_pmu_disable(cpuctx->ctx.pmu);
1637 perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); 1633 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
1638 if (ctx) 1634 if (ctx)
1639 perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); 1635 perf_ctx_adjust_freq(ctx, interval);
1640 1636
1641 if (!rotate) 1637 if (!rotate)
1642 goto done; 1638 goto done;
@@ -1654,10 +1650,24 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
1654 task_ctx_sched_in(ctx, EVENT_FLEXIBLE); 1650 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
1655 1651
1656done: 1652done:
1653 if (remove)
1654 list_del_init(&cpuctx->rotation_list);
1655
1657 perf_pmu_enable(cpuctx->ctx.pmu); 1656 perf_pmu_enable(cpuctx->ctx.pmu);
1658 hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); 1657}
1658
1659void perf_event_task_tick(void)
1660{
1661 struct list_head *head = &__get_cpu_var(rotation_list);
1662 struct perf_cpu_context *cpuctx, *tmp;
1659 1663
1660 return restart; 1664 WARN_ON(!irqs_disabled());
1665
1666 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1667 if (cpuctx->jiffies_interval == 1 ||
1668 !(jiffies % cpuctx->jiffies_interval))
1669 perf_rotate_context(cpuctx);
1670 }
1661} 1671}
1662 1672
1663static int event_enable_on_exec(struct perf_event *event, 1673static int event_enable_on_exec(struct perf_event *event,
@@ -5186,9 +5196,8 @@ int perf_pmu_register(struct pmu *pmu)
5186 __perf_event_init_context(&cpuctx->ctx); 5196 __perf_event_init_context(&cpuctx->ctx);
5187 cpuctx->ctx.type = cpu_context; 5197 cpuctx->ctx.type = cpu_context;
5188 cpuctx->ctx.pmu = pmu; 5198 cpuctx->ctx.pmu = pmu;
5189 cpuctx->timer_interval = TICK_NSEC; 5199 cpuctx->jiffies_interval = 1;
5190 hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5200 INIT_LIST_HEAD(&cpuctx->rotation_list);
5191 cpuctx->timer.function = perf_event_context_tick;
5192 } 5201 }
5193 5202
5194got_cpu_context: 5203got_cpu_context:
@@ -6229,6 +6238,7 @@ static void __init perf_event_init_all_cpus(void)
6229 for_each_possible_cpu(cpu) { 6238 for_each_possible_cpu(cpu) {
6230 swhash = &per_cpu(swevent_htable, cpu); 6239 swhash = &per_cpu(swevent_htable, cpu);
6231 mutex_init(&swhash->hlist_mutex); 6240 mutex_init(&swhash->hlist_mutex);
6241 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6232 } 6242 }
6233} 6243}
6234 6244
@@ -6248,6 +6258,15 @@ static void __cpuinit perf_event_init_cpu(int cpu)
6248} 6258}
6249 6259
6250#ifdef CONFIG_HOTPLUG_CPU 6260#ifdef CONFIG_HOTPLUG_CPU
6261static void perf_pmu_rotate_stop(struct pmu *pmu)
6262{
6263 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6264
6265 WARN_ON(!irqs_disabled());
6266
6267 list_del_init(&cpuctx->rotation_list);
6268}
6269
6251static void __perf_event_exit_context(void *__info) 6270static void __perf_event_exit_context(void *__info)
6252{ 6271{
6253 struct perf_event_context *ctx = __info; 6272 struct perf_event_context *ctx = __info;
diff --git a/kernel/sched.c b/kernel/sched.c
index 1c3ea7a55b7b..794819eab9ca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3584,6 +3584,8 @@ void scheduler_tick(void)
3584 curr->sched_class->task_tick(rq, curr, 0); 3584 curr->sched_class->task_tick(rq, curr, 0);
3585 raw_spin_unlock(&rq->lock); 3585 raw_spin_unlock(&rq->lock);
3586 3586
3587 perf_event_task_tick();
3588
3587#ifdef CONFIG_SMP 3589#ifdef CONFIG_SMP
3588 rq->idle_at_tick = idle_cpu(cpu); 3590 rq->idle_at_tick = idle_cpu(cpu);
3589 trigger_load_balance(rq, cpu); 3591 trigger_load_balance(rq, cpu);