aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-06-04 13:31:18 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-05 06:09:53 -0400
commite3baac47f0e82c4be632f4f97215bb93bf16b342 (patch)
treecae0a8012654966d9c295f517661c77b2dab2f95 /kernel/sched/sched.h
parent67b9ca70c3030e832999e8d1cdba2984c7bb5bfc (diff)
sched/idle: Optimize try-to-wake-up IPI
[ This series reduces the number of IPIs on Andy's workload by something like 99%. It's down from many hundreds per second to very few. The basic idea behind this series is to make TIF_POLLING_NRFLAG be a reliable indication that the idle task is polling. Once that's done, the rest is reasonably straightforward. ] When enqueueing tasks on remote LLC domains, we send an IPI to do the work 'locally' and avoid bouncing all the cachelines over. However, when the remote CPU is idle (and polling, say x86 mwait), we don't need to send an IPI, we can simply kick the TIF word to wake it up and have the 'idle' loop do the work. So when _TIF_POLLING_NRFLAG is set, but _TIF_NEED_RESCHED is not (yet) set, set _TIF_NEED_RESCHED and avoid sending the IPI. Much-requested-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Peter Zijlstra <peterz@infradead.org> [Edited by Andy Lutomirski, but this is mostly Peter Zijlstra's code.] Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: umgwanakikbuti@gmail.com Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/ce06f8b02e7e337be63e97597fc4b248d3aa6f9b.1401902905.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 956b8ca24893..2f8636199b83 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -670,6 +670,8 @@ extern int migrate_swap(struct task_struct *, struct task_struct *);
670 670
671#ifdef CONFIG_SMP 671#ifdef CONFIG_SMP
672 672
673extern void sched_ttwu_pending(void);
674
673#define rcu_dereference_check_sched_domain(p) \ 675#define rcu_dereference_check_sched_domain(p) \
674 rcu_dereference_check((p), \ 676 rcu_dereference_check((p), \
675 lockdep_is_held(&sched_domains_mutex)) 677 lockdep_is_held(&sched_domains_mutex))
@@ -787,6 +789,10 @@ static inline unsigned int group_first_cpu(struct sched_group *group)
787 789
788extern int group_balance_cpu(struct sched_group *sg); 790extern int group_balance_cpu(struct sched_group *sg);
789 791
792#else
793
794static inline void sched_ttwu_pending(void) { }
795
790#endif /* CONFIG_SMP */ 796#endif /* CONFIG_SMP */
791 797
792#include "stats.h" 798#include "stats.h"