aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h8
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/perf_counter.c49
-rw-r--r--kernel/sched.c7
4 files changed, 61 insertions, 6 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index d2a16563415f..f30486fc55d7 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -42,6 +42,8 @@ enum hw_event_types {
42 PERF_COUNT_BRANCH_INSTRUCTIONS = 4, 42 PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
43 PERF_COUNT_BRANCH_MISSES = 5, 43 PERF_COUNT_BRANCH_MISSES = 5,
44 44
45 PERF_HW_EVENTS_MAX = 6,
46
45 /* 47 /*
46 * Special "software" counters provided by the kernel, even if 48 * Special "software" counters provided by the kernel, even if
47 * the hardware does not support performance counters. These 49 * the hardware does not support performance counters. These
@@ -50,11 +52,11 @@ enum hw_event_types {
50 */ 52 */
51 PERF_COUNT_CPU_CLOCK = -1, 53 PERF_COUNT_CPU_CLOCK = -1,
52 PERF_COUNT_TASK_CLOCK = -2, 54 PERF_COUNT_TASK_CLOCK = -2,
53 /*
54 * Future software events:
55 */
56 PERF_COUNT_PAGE_FAULTS = -3, 55 PERF_COUNT_PAGE_FAULTS = -3,
57 PERF_COUNT_CONTEXT_SWITCHES = -4, 56 PERF_COUNT_CONTEXT_SWITCHES = -4,
57 PERF_COUNT_CPU_MIGRATIONS = -5,
58
59 PERF_SW_EVENTS_MIN = -6,
58}; 60};
59 61
60/* 62/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4c530278391b..2e15be8fc792 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1014,6 +1014,8 @@ struct sched_entity {
1014 u64 last_wakeup; 1014 u64 last_wakeup;
1015 u64 avg_overlap; 1015 u64 avg_overlap;
1016 1016
1017 u64 nr_migrations;
1018
1017#ifdef CONFIG_SCHEDSTATS 1019#ifdef CONFIG_SCHEDSTATS
1018 u64 wait_start; 1020 u64 wait_start;
1019 u64 wait_max; 1021 u64 wait_max;
@@ -1029,7 +1031,6 @@ struct sched_entity {
1029 u64 exec_max; 1031 u64 exec_max;
1030 u64 slice_max; 1032 u64 slice_max;
1031 1033
1032 u64 nr_migrations;
1033 u64 nr_migrations_cold; 1034 u64 nr_migrations_cold;
1034 u64 nr_failed_migrations_affine; 1035 u64 nr_failed_migrations_affine;
1035 u64 nr_failed_migrations_running; 1036 u64 nr_failed_migrations_running;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 09287091c526..fb11e351e44e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -936,6 +936,52 @@ static const struct hw_perf_counter_ops perf_ops_context_switches = {
936 .hw_perf_counter_read = context_switches_perf_counter_read, 936 .hw_perf_counter_read = context_switches_perf_counter_read,
937}; 937};
938 938
939static inline u64 get_cpu_migrations(void)
940{
941 return current->se.nr_migrations;
942}
943
944static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
945{
946 u64 prev, now;
947 s64 delta;
948
949 prev = atomic64_read(&counter->hw.prev_count);
950 now = get_cpu_migrations();
951
952 atomic64_set(&counter->hw.prev_count, now);
953
954 delta = now - prev;
955 if (WARN_ON_ONCE(delta < 0))
956 delta = 0;
957
958 atomic64_add(delta, &counter->count);
959}
960
961static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
962{
963 cpu_migrations_perf_counter_update(counter);
964}
965
966static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
967{
968 /*
969 * se.nr_migrations is a per-task value already,
970 * so we dont have to clear it on switch-in.
971 */
972}
973
974static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
975{
976 cpu_migrations_perf_counter_update(counter);
977}
978
979static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
980 .hw_perf_counter_enable = cpu_migrations_perf_counter_enable,
981 .hw_perf_counter_disable = cpu_migrations_perf_counter_disable,
982 .hw_perf_counter_read = cpu_migrations_perf_counter_read,
983};
984
939static const struct hw_perf_counter_ops * 985static const struct hw_perf_counter_ops *
940sw_perf_counter_init(struct perf_counter *counter) 986sw_perf_counter_init(struct perf_counter *counter)
941{ 987{
@@ -951,6 +997,9 @@ sw_perf_counter_init(struct perf_counter *counter)
951 case PERF_COUNT_CONTEXT_SWITCHES: 997 case PERF_COUNT_CONTEXT_SWITCHES:
952 hw_ops = &perf_ops_context_switches; 998 hw_ops = &perf_ops_context_switches;
953 break; 999 break;
1000 case PERF_COUNT_CPU_MIGRATIONS:
1001 hw_ops = &perf_ops_cpu_migrations;
1002 break;
954 default: 1003 default:
955 break; 1004 break;
956 } 1005 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 5c3f4106314e..382cfdb5e38d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1852,12 +1852,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1852 p->se.sleep_start -= clock_offset; 1852 p->se.sleep_start -= clock_offset;
1853 if (p->se.block_start) 1853 if (p->se.block_start)
1854 p->se.block_start -= clock_offset; 1854 p->se.block_start -= clock_offset;
1855#endif
1855 if (old_cpu != new_cpu) { 1856 if (old_cpu != new_cpu) {
1856 schedstat_inc(p, se.nr_migrations); 1857 p->se.nr_migrations++;
1858#ifdef CONFIG_SCHEDSTATS
1857 if (task_hot(p, old_rq->clock, NULL)) 1859 if (task_hot(p, old_rq->clock, NULL))
1858 schedstat_inc(p, se.nr_forced2_migrations); 1860 schedstat_inc(p, se.nr_forced2_migrations);
1859 }
1860#endif 1861#endif
1862 }
1861 p->se.vruntime -= old_cfsrq->min_vruntime - 1863 p->se.vruntime -= old_cfsrq->min_vruntime -
1862 new_cfsrq->min_vruntime; 1864 new_cfsrq->min_vruntime;
1863 1865
@@ -2375,6 +2377,7 @@ static void __sched_fork(struct task_struct *p)
2375 p->se.exec_start = 0; 2377 p->se.exec_start = 0;
2376 p->se.sum_exec_runtime = 0; 2378 p->se.sum_exec_runtime = 0;
2377 p->se.prev_sum_exec_runtime = 0; 2379 p->se.prev_sum_exec_runtime = 0;
2380 p->se.nr_migrations = 0;
2378 p->se.last_wakeup = 0; 2381 p->se.last_wakeup = 0;
2379 p->se.avg_overlap = 0; 2382 p->se.avg_overlap = 0;
2380 2383