aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c40
1 files changed, 36 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a455dca884a6..28dd4f490bfc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1992,6 +1992,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1992 p->sched_class->prio_changed(rq, p, oldprio, running); 1992 p->sched_class->prio_changed(rq, p, oldprio, running);
1993} 1993}
1994 1994
1995/**
1996 * kthread_bind - bind a just-created kthread to a cpu.
1997 * @k: thread created by kthread_create().
1998 * @cpu: cpu (might not be online, must be possible) for @k to run on.
1999 *
2000 * Description: This function is equivalent to set_cpus_allowed(),
2001 * except that @cpu doesn't need to be online, and the thread must be
2002 * stopped (i.e., just returned from kthread_create()).
2003 *
2004 * Function lives here instead of kthread.c because it messes with
2005 * scheduler internals which require locking.
2006 */
2007void kthread_bind(struct task_struct *p, unsigned int cpu)
2008{
2009 struct rq *rq = cpu_rq(cpu);
2010 unsigned long flags;
2011
2012 /* Must have done schedule() in kthread() before we set_task_cpu */
2013 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2014 WARN_ON(1);
2015 return;
2016 }
2017
2018 spin_lock_irqsave(&rq->lock, flags);
2019 set_task_cpu(p, cpu);
2020 p->cpus_allowed = cpumask_of_cpu(cpu);
2021 p->rt.nr_cpus_allowed = 1;
2022 p->flags |= PF_THREAD_BOUND;
2023 spin_unlock_irqrestore(&rq->lock, flags);
2024}
2025EXPORT_SYMBOL(kthread_bind);
2026
1995#ifdef CONFIG_SMP 2027#ifdef CONFIG_SMP
1996/* 2028/*
1997 * Is this task likely cache-hot: 2029 * Is this task likely cache-hot:
@@ -2004,7 +2036,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2004 /* 2036 /*
2005 * Buddy candidates are cache hot: 2037 * Buddy candidates are cache hot:
2006 */ 2038 */
2007 if (sched_feat(CACHE_HOT_BUDDY) && 2039 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2008 (&p->se == cfs_rq_of(&p->se)->next || 2040 (&p->se == cfs_rq_of(&p->se)->next ||
2009 &p->se == cfs_rq_of(&p->se)->last)) 2041 &p->se == cfs_rq_of(&p->se)->last))
2010 return 1; 2042 return 1;
@@ -9532,13 +9564,13 @@ void __init sched_init(void)
9532 current->sched_class = &fair_sched_class; 9564 current->sched_class = &fair_sched_class;
9533 9565
9534 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9566 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9535 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9567 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9536#ifdef CONFIG_SMP 9568#ifdef CONFIG_SMP
9537#ifdef CONFIG_NO_HZ 9569#ifdef CONFIG_NO_HZ
9538 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9570 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9539 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9571 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9540#endif 9572#endif
9541 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9573 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9542#endif /* SMP */ 9574#endif /* SMP */
9543 9575
9544 perf_event_init(); 9576 perf_event_init();