aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h4
-rw-r--r--kernel/perf_counter.c74
-rw-r--r--kernel/sched.c1
3 files changed, 26 insertions, 53 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 0e57d8cc5a3d..deb9acf9ad2a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -615,6 +615,8 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len,
615 615
616extern void perf_counter_comm(struct task_struct *tsk); 616extern void perf_counter_comm(struct task_struct *tsk);
617 617
618extern void perf_counter_task_migration(struct task_struct *task, int cpu);
619
618#define MAX_STACK_DEPTH 255 620#define MAX_STACK_DEPTH 255
619 621
620struct perf_callchain_entry { 622struct perf_callchain_entry {
@@ -668,6 +670,8 @@ perf_counter_munmap(unsigned long addr, unsigned long len,
668 670
669static inline void perf_counter_comm(struct task_struct *tsk) { } 671static inline void perf_counter_comm(struct task_struct *tsk) { }
670static inline void perf_counter_init(void) { } 672static inline void perf_counter_init(void) { }
673static inline void perf_counter_task_migration(struct task_struct *task,
674 int cpu) { }
671#endif 675#endif
672 676
673#endif /* __KERNEL__ */ 677#endif /* __KERNEL__ */
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 8d2653f137e9..cd94cf3bf9e2 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2921,11 +2921,13 @@ static int perf_swcounter_match(struct perf_counter *counter,
2921 if (counter->hw_event.config != event_config) 2921 if (counter->hw_event.config != event_config)
2922 return 0; 2922 return 0;
2923 2923
2924 if (counter->hw_event.exclude_user && user_mode(regs)) 2924 if (regs) {
2925 return 0; 2925 if (counter->hw_event.exclude_user && user_mode(regs))
2926 return 0;
2926 2927
2927 if (counter->hw_event.exclude_kernel && !user_mode(regs)) 2928 if (counter->hw_event.exclude_kernel && !user_mode(regs))
2928 return 0; 2929 return 0;
2930 }
2929 2931
2930 return 1; 2932 return 1;
2931} 2933}
@@ -2935,7 +2937,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2935{ 2937{
2936 int neg = atomic64_add_negative(nr, &counter->hw.count); 2938 int neg = atomic64_add_negative(nr, &counter->hw.count);
2937 2939
2938 if (counter->hw.irq_period && !neg) 2940 if (counter->hw.irq_period && !neg && regs)
2939 perf_swcounter_overflow(counter, nmi, regs, addr); 2941 perf_swcounter_overflow(counter, nmi, regs, addr);
2940} 2942}
2941 2943
@@ -3151,55 +3153,24 @@ static const struct pmu perf_ops_task_clock = {
3151/* 3153/*
3152 * Software counter: cpu migrations 3154 * Software counter: cpu migrations
3153 */ 3155 */
3154 3156void perf_counter_task_migration(struct task_struct *task, int cpu)
3155static inline u64 get_cpu_migrations(struct perf_counter *counter)
3156{
3157 struct task_struct *curr = counter->ctx->task;
3158
3159 if (curr)
3160 return curr->se.nr_migrations;
3161 return cpu_nr_migrations(smp_processor_id());
3162}
3163
3164static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
3165{
3166 u64 prev, now;
3167 s64 delta;
3168
3169 prev = atomic64_read(&counter->hw.prev_count);
3170 now = get_cpu_migrations(counter);
3171
3172 atomic64_set(&counter->hw.prev_count, now);
3173
3174 delta = now - prev;
3175
3176 atomic64_add(delta, &counter->count);
3177}
3178
3179static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
3180{ 3157{
3181 cpu_migrations_perf_counter_update(counter); 3158 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3182} 3159 struct perf_counter_context *ctx;
3183 3160
3184static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) 3161 perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3185{ 3162 PERF_COUNT_CPU_MIGRATIONS,
3186 if (counter->prev_state <= PERF_COUNTER_STATE_OFF) 3163 1, 1, NULL, 0);
3187 atomic64_set(&counter->hw.prev_count,
3188 get_cpu_migrations(counter));
3189 return 0;
3190}
3191 3164
3192static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) 3165 ctx = perf_pin_task_context(task);
3193{ 3166 if (ctx) {
3194 cpu_migrations_perf_counter_update(counter); 3167 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3168 PERF_COUNT_CPU_MIGRATIONS,
3169 1, 1, NULL, 0);
3170 perf_unpin_context(ctx);
3171 }
3195} 3172}
3196 3173
3197static const struct pmu perf_ops_cpu_migrations = {
3198 .enable = cpu_migrations_perf_counter_enable,
3199 .disable = cpu_migrations_perf_counter_disable,
3200 .read = cpu_migrations_perf_counter_read,
3201};
3202
3203#ifdef CONFIG_EVENT_PROFILE 3174#ifdef CONFIG_EVENT_PROFILE
3204void perf_tpcounter_event(int event_id) 3175void perf_tpcounter_event(int event_id)
3205{ 3176{
@@ -3272,11 +3243,8 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3272 case PERF_COUNT_PAGE_FAULTS_MIN: 3243 case PERF_COUNT_PAGE_FAULTS_MIN:
3273 case PERF_COUNT_PAGE_FAULTS_MAJ: 3244 case PERF_COUNT_PAGE_FAULTS_MAJ:
3274 case PERF_COUNT_CONTEXT_SWITCHES: 3245 case PERF_COUNT_CONTEXT_SWITCHES:
3275 pmu = &perf_ops_generic;
3276 break;
3277 case PERF_COUNT_CPU_MIGRATIONS: 3246 case PERF_COUNT_CPU_MIGRATIONS:
3278 if (!counter->hw_event.exclude_kernel) 3247 pmu = &perf_ops_generic;
3279 pmu = &perf_ops_cpu_migrations;
3280 break; 3248 break;
3281 } 3249 }
3282 3250
diff --git a/kernel/sched.c b/kernel/sched.c
index 3226cc132e9f..8d43347a0c0d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1977,6 +1977,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1977 if (task_hot(p, old_rq->clock, NULL)) 1977 if (task_hot(p, old_rq->clock, NULL))
1978 schedstat_inc(p, se.nr_forced2_migrations); 1978 schedstat_inc(p, se.nr_forced2_migrations);
1979#endif 1979#endif
1980 perf_counter_task_migration(p, new_cpu);
1980 } 1981 }
1981 p->se.vruntime -= old_cfsrq->min_vruntime - 1982 p->se.vruntime -= old_cfsrq->min_vruntime -
1982 new_cfsrq->min_vruntime; 1983 new_cfsrq->min_vruntime;