aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-05-08 12:56:04 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-09 09:23:17 -0400
commitcb04ff9ac424d0e689d9b612e9f73cb443ab4b7e (patch)
tree7335f4ca82dc0366ffbf9dc8de5396259213435a
parent8b1e13638d465863572c8207a5cfceeef0cf0441 (diff)
sched, perf: Use a single callback into the scheduler
We can easily use a single callback for both sched-in and sched-out. This reduces the code footprint in the scheduler path as well as removes the PMU black spot otherwise present between the out and in callback. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-o56ajxp1edwqg6x9d31wb805@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/sched/core.c9
3 files changed, 17 insertions, 30 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f32578634d9d..8adf70e9e3cc 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1084,10 +1084,8 @@ extern void perf_pmu_unregister(struct pmu *pmu);
1084 1084
1085extern int perf_num_counters(void); 1085extern int perf_num_counters(void);
1086extern const char *perf_pmu_name(void); 1086extern const char *perf_pmu_name(void);
1087extern void __perf_event_task_sched_in(struct task_struct *prev, 1087extern void __perf_event_task_sched(struct task_struct *prev,
1088 struct task_struct *task); 1088 struct task_struct *next);
1089extern void __perf_event_task_sched_out(struct task_struct *prev,
1090 struct task_struct *next);
1091extern int perf_event_init_task(struct task_struct *child); 1089extern int perf_event_init_task(struct task_struct *child);
1092extern void perf_event_exit_task(struct task_struct *child); 1090extern void perf_event_exit_task(struct task_struct *child);
1093extern void perf_event_free_task(struct task_struct *task); 1091extern void perf_event_free_task(struct task_struct *task);
@@ -1207,20 +1205,13 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1207 1205
1208extern struct static_key_deferred perf_sched_events; 1206extern struct static_key_deferred perf_sched_events;
1209 1207
1210static inline void perf_event_task_sched_in(struct task_struct *prev, 1208static inline void perf_event_task_sched(struct task_struct *prev,
1211 struct task_struct *task) 1209 struct task_struct *task)
1212{ 1210{
1213 if (static_key_false(&perf_sched_events.key))
1214 __perf_event_task_sched_in(prev, task);
1215}
1216
1217static inline void perf_event_task_sched_out(struct task_struct *prev,
1218 struct task_struct *next)
1219{
1220 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1211 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1221 1212
1222 if (static_key_false(&perf_sched_events.key)) 1213 if (static_key_false(&perf_sched_events.key))
1223 __perf_event_task_sched_out(prev, next); 1214 __perf_event_task_sched(prev, task);
1224} 1215}
1225 1216
1226extern void perf_event_mmap(struct vm_area_struct *vma); 1217extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1295,11 +1286,8 @@ extern void perf_event_disable(struct perf_event *event);
1295extern void perf_event_task_tick(void); 1286extern void perf_event_task_tick(void);
1296#else 1287#else
1297static inline void 1288static inline void
1298perf_event_task_sched_in(struct task_struct *prev, 1289perf_event_task_sched(struct task_struct *prev,
1299 struct task_struct *task) { } 1290 struct task_struct *task) { }
1300static inline void
1301perf_event_task_sched_out(struct task_struct *prev,
1302 struct task_struct *next) { }
1303static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1291static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1304static inline void perf_event_exit_task(struct task_struct *child) { } 1292static inline void perf_event_exit_task(struct task_struct *child) { }
1305static inline void perf_event_free_task(struct task_struct *task) { } 1293static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 00c58df9f4e2..e82c7a1face9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2039 * accessing the event control register. If a NMI hits, then it will 2039 * accessing the event control register. If a NMI hits, then it will
2040 * not restart the event. 2040 * not restart the event.
2041 */ 2041 */
2042void __perf_event_task_sched_out(struct task_struct *task, 2042static void __perf_event_task_sched_out(struct task_struct *task,
2043 struct task_struct *next) 2043 struct task_struct *next)
2044{ 2044{
2045 int ctxn; 2045 int ctxn;
2046 2046
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
2279 * accessing the event control register. If a NMI hits, then it will 2279 * accessing the event control register. If a NMI hits, then it will
2280 * keep the event running. 2280 * keep the event running.
2281 */ 2281 */
2282void __perf_event_task_sched_in(struct task_struct *prev, 2282static void __perf_event_task_sched_in(struct task_struct *prev,
2283 struct task_struct *task) 2283 struct task_struct *task)
2284{ 2284{
2285 struct perf_event_context *ctx; 2285 struct perf_event_context *ctx;
2286 int ctxn; 2286 int ctxn;
@@ -2305,6 +2305,12 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2305 perf_branch_stack_sched_in(prev, task); 2305 perf_branch_stack_sched_in(prev, task);
2306} 2306}
2307 2307
2308void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
2309{
2310 __perf_event_task_sched_out(prev, next);
2311 __perf_event_task_sched_in(prev, next);
2312}
2313
2308static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2314static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2309{ 2315{
2310 u64 frequency = event->attr.sample_freq; 2316 u64 frequency = event->attr.sample_freq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4603b9d8f30a..5c692a0a555d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
1913 struct task_struct *next) 1913 struct task_struct *next)
1914{ 1914{
1915 sched_info_switch(prev, next); 1915 sched_info_switch(prev, next);
1916 perf_event_task_sched_out(prev, next); 1916 perf_event_task_sched(prev, next);
1917 fire_sched_out_preempt_notifiers(prev, next); 1917 fire_sched_out_preempt_notifiers(prev, next);
1918 prepare_lock_switch(rq, next); 1918 prepare_lock_switch(rq, next);
1919 prepare_arch_switch(next); 1919 prepare_arch_switch(next);
@@ -1956,13 +1956,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1956 */ 1956 */
1957 prev_state = prev->state; 1957 prev_state = prev->state;
1958 finish_arch_switch(prev); 1958 finish_arch_switch(prev);
1959#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1960 local_irq_disable();
1961#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1962 perf_event_task_sched_in(prev, current);
1963#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1964 local_irq_enable();
1965#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1966 finish_lock_switch(rq, prev); 1959 finish_lock_switch(rq, prev);
1967 finish_arch_post_lock_switch(); 1960 finish_arch_post_lock_switch();
1968 1961