aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-07-22 20:31:05 -0400
committerIngo Molnar <mingo@kernel.org>2013-07-30 16:29:15 -0400
commitd84153d6c96f61aa06429586284639f32debf03e (patch)
tree3aab6dd1374fa5f9beb6ea0ad65bcbcf25798faa /kernel
parentba8a75c16e292c0a3a87406a77508cbbc6cf4ee2 (diff)
perf: Implement finer grained full dynticks kick
Currently the full dynticks subsystem keep the tick alive as long as there are perf events running. This prevents the tick from being stopped as long as features such that the lockup detectors are running. As a temporary fix, the lockup detector is disabled by default when full dynticks is built but this is not a long term viable solution. To fix this, only keep the tick alive when an event configured with a frequency rather than a period is running on the CPU, or when an event throttles on the CPU. These are the only purposes of the perf tick, especially now that the rotation of flexible events is handled from a seperate hrtimer. The tick can be shutdown the rest of the time. Original-patch-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1374539466-4799-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3fe385aa93e6..916cf1f593b4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -870,12 +870,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu)
870 870
871 WARN_ON(!irqs_disabled()); 871 WARN_ON(!irqs_disabled());
872 872
873 if (list_empty(&cpuctx->rotation_list)) { 873 if (list_empty(&cpuctx->rotation_list))
874 int was_empty = list_empty(head);
875 list_add(&cpuctx->rotation_list, head); 874 list_add(&cpuctx->rotation_list, head);
876 if (was_empty)
877 tick_nohz_full_kick();
878 }
879} 875}
880 876
881static void get_ctx(struct perf_event_context *ctx) 877static void get_ctx(struct perf_event_context *ctx)
@@ -1875,6 +1871,9 @@ static int __perf_install_in_context(void *info)
1875 perf_pmu_enable(cpuctx->ctx.pmu); 1871 perf_pmu_enable(cpuctx->ctx.pmu);
1876 perf_ctx_unlock(cpuctx, task_ctx); 1872 perf_ctx_unlock(cpuctx, task_ctx);
1877 1873
1874 if (atomic_read(&__get_cpu_var(perf_freq_events)))
1875 tick_nohz_full_kick();
1876
1878 return 0; 1877 return 0;
1879} 1878}
1880 1879
@@ -2812,10 +2811,11 @@ done:
2812#ifdef CONFIG_NO_HZ_FULL 2811#ifdef CONFIG_NO_HZ_FULL
2813bool perf_event_can_stop_tick(void) 2812bool perf_event_can_stop_tick(void)
2814{ 2813{
2815 if (list_empty(&__get_cpu_var(rotation_list))) 2814 if (atomic_read(&__get_cpu_var(perf_freq_events)) ||
2816 return true; 2815 __this_cpu_read(perf_throttled_count))
2817 else
2818 return false; 2816 return false;
2817 else
2818 return true;
2819} 2819}
2820#endif 2820#endif
2821 2821
@@ -5202,6 +5202,7 @@ static int __perf_event_overflow(struct perf_event *event,
5202 __this_cpu_inc(perf_throttled_count); 5202 __this_cpu_inc(perf_throttled_count);
5203 hwc->interrupts = MAX_INTERRUPTS; 5203 hwc->interrupts = MAX_INTERRUPTS;
5204 perf_log_throttle(event, 0); 5204 perf_log_throttle(event, 0);
5205 tick_nohz_full_kick();
5205 ret = 1; 5206 ret = 1;
5206 } 5207 }
5207 } 5208 }