aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index bf21adb6c9fc..5cb7d637e33a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1996,6 +1996,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1996 p->sched_class->prio_changed(rq, p, oldprio, running); 1996 p->sched_class->prio_changed(rq, p, oldprio, running);
1997} 1997}
1998 1998
1999/**
2000 * kthread_bind - bind a just-created kthread to a cpu.
2001 * @k: thread created by kthread_create().
2002 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2003 *
2004 * Description: This function is equivalent to set_cpus_allowed(),
2005 * except that @cpu doesn't need to be online, and the thread must be
2006 * stopped (i.e., just returned from kthread_create()).
2007 *
2008 * Function lives here instead of kthread.c because it messes with
2009 * scheduler internals which require locking.
2010 */
2011void kthread_bind(struct task_struct *p, unsigned int cpu)
2012{
2013 struct rq *rq = cpu_rq(cpu);
2014 unsigned long flags;
2015
2016 /* Must have done schedule() in kthread() before we set_task_cpu */
2017 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2018 WARN_ON(1);
2019 return;
2020 }
2021
2022 spin_lock_irqsave(&rq->lock, flags);
2023 set_task_cpu(p, cpu);
2024 p->cpus_allowed = cpumask_of_cpu(cpu);
2025 p->rt.nr_cpus_allowed = 1;
2026 p->flags |= PF_THREAD_BOUND;
2027 spin_unlock_irqrestore(&rq->lock, flags);
2028}
2029EXPORT_SYMBOL(kthread_bind);
2030
1999#ifdef CONFIG_SMP 2031#ifdef CONFIG_SMP
2000/* 2032/*
2001 * Is this task likely cache-hot: 2033 * Is this task likely cache-hot: