aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-04 12:54:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-04 13:29:57 -0400
commitb82914ce33146186d554b0f5c41e4e13693614ce (patch)
treedde12a31830dd4216433d9a6b365c90bccaa1792 /kernel
parentdab6f6a3401f596fe934f41fc5da3f401adfdfb1 (diff)
perf_counter: round-robin per-CPU counters too
This used to be unstable when we had the rq->lock dependencies, but now that they are that of the past we can turn on percpu counter RR too. [ Impact: handle counter over-commit for per-CPU counters too ] LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 8660ae579530..b9679c36bcc2 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1069,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
1069{ 1069{
1070 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1070 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1071 struct perf_counter_context *ctx = &curr->perf_counter_ctx; 1071 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1072 const int rotate_percpu = 0;
1073 1072
1074 if (rotate_percpu) 1073 perf_counter_cpu_sched_out(cpuctx);
1075 perf_counter_cpu_sched_out(cpuctx);
1076 perf_counter_task_sched_out(curr, cpu); 1074 perf_counter_task_sched_out(curr, cpu);
1077 1075
1078 if (rotate_percpu) 1076 rotate_ctx(&cpuctx->ctx);
1079 rotate_ctx(&cpuctx->ctx);
1080 rotate_ctx(ctx); 1077 rotate_ctx(ctx);
1081 1078
1082 if (rotate_percpu) 1079 perf_counter_cpu_sched_in(cpuctx, cpu);
1083 perf_counter_cpu_sched_in(cpuctx, cpu);
1084 perf_counter_task_sched_in(curr, cpu); 1080 perf_counter_task_sched_in(curr, cpu);
1085} 1081}
1086 1082