aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4f13d379bea5..ce9bb7aa7c12 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -608,7 +608,7 @@ repeat_lock_task:
608 return rq; 608 return rq;
609} 609}
610 610
611static inline void __task_rq_unlock(struct rq *rq) 611static void __task_rq_unlock(struct rq *rq)
612 __releases(rq->lock) 612 __releases(rq->lock)
613{ 613{
614 spin_unlock(&rq->lock); 614 spin_unlock(&rq->lock);
@@ -623,7 +623,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
623/* 623/*
624 * this_rq_lock - lock this runqueue and disable interrupts. 624 * this_rq_lock - lock this runqueue and disable interrupts.
625 */ 625 */
626static inline struct rq *this_rq_lock(void) 626static struct rq *this_rq_lock(void)
627 __acquires(rq->lock) 627 __acquires(rq->lock)
628{ 628{
629 struct rq *rq; 629 struct rq *rq;
@@ -986,20 +986,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
986} 986}
987 987
988/* 988/*
989 * activate_idle_task - move idle task to the _front_ of runqueue.
990 */
991static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
992{
993 update_rq_clock(rq);
994
995 if (p->state == TASK_UNINTERRUPTIBLE)
996 rq->nr_uninterruptible--;
997
998 enqueue_task(rq, p, 0);
999 inc_nr_running(p, rq);
1000}
1001
1002/*
1003 * deactivate_task - remove a task from the runqueue. 989 * deactivate_task - remove a task from the runqueue.
1004 */ 990 */
1005static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 991static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
@@ -1206,7 +1192,7 @@ void kick_process(struct task_struct *p)
1206 * We want to under-estimate the load of migration sources, to 1192 * We want to under-estimate the load of migration sources, to
1207 * balance conservatively. 1193 * balance conservatively.
1208 */ 1194 */
1209static inline unsigned long source_load(int cpu, int type) 1195static unsigned long source_load(int cpu, int type)
1210{ 1196{
1211 struct rq *rq = cpu_rq(cpu); 1197 struct rq *rq = cpu_rq(cpu);
1212 unsigned long total = weighted_cpuload(cpu); 1198 unsigned long total = weighted_cpuload(cpu);
@@ -1221,7 +1207,7 @@ static inline unsigned long source_load(int cpu, int type)
1221 * Return a high guess at the load of a migration-target cpu weighted 1207 * Return a high guess at the load of a migration-target cpu weighted
1222 * according to the scheduling class and "nice" value. 1208 * according to the scheduling class and "nice" value.
1223 */ 1209 */
1224static inline unsigned long target_load(int cpu, int type) 1210static unsigned long target_load(int cpu, int type)
1225{ 1211{
1226 struct rq *rq = cpu_rq(cpu); 1212 struct rq *rq = cpu_rq(cpu);
1227 unsigned long total = weighted_cpuload(cpu); 1213 unsigned long total = weighted_cpuload(cpu);
@@ -1813,7 +1799,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
1813 * with the lock held can cause deadlocks; see schedule() for 1799 * with the lock held can cause deadlocks; see schedule() for
1814 * details.) 1800 * details.)
1815 */ 1801 */
1816static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) 1802static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1817 __releases(rq->lock) 1803 __releases(rq->lock)
1818{ 1804{
1819 struct mm_struct *mm = rq->prev_mm; 1805 struct mm_struct *mm = rq->prev_mm;
@@ -3020,7 +3006,7 @@ static DEFINE_SPINLOCK(balancing);
3020 * 3006 *
3021 * Balancing parameters are set up in arch_init_sched_domains. 3007 * Balancing parameters are set up in arch_init_sched_domains.
3022 */ 3008 */
3023static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) 3009static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3024{ 3010{
3025 int balance = 1; 3011 int balance = 1;
3026 struct rq *rq = cpu_rq(cpu); 3012 struct rq *rq = cpu_rq(cpu);
@@ -4140,7 +4126,7 @@ struct task_struct *idle_task(int cpu)
4140 * find_process_by_pid - find a process with a matching PID value. 4126 * find_process_by_pid - find a process with a matching PID value.
4141 * @pid: the pid in question. 4127 * @pid: the pid in question.
4142 */ 4128 */
4143static inline struct task_struct *find_process_by_pid(pid_t pid) 4129static struct task_struct *find_process_by_pid(pid_t pid)
4144{ 4130{
4145 return pid ? find_task_by_pid(pid) : current; 4131 return pid ? find_task_by_pid(pid) : current;
4146} 4132}
@@ -5157,6 +5143,20 @@ static void migrate_live_tasks(int src_cpu)
5157} 5143}
5158 5144
5159/* 5145/*
5146 * activate_idle_task - move idle task to the _front_ of runqueue.
5147 */
5148static void activate_idle_task(struct task_struct *p, struct rq *rq)
5149{
5150 update_rq_clock(rq);
5151
5152 if (p->state == TASK_UNINTERRUPTIBLE)
5153 rq->nr_uninterruptible--;
5154
5155 enqueue_task(rq, p, 0);
5156 inc_nr_running(p, rq);
5157}
5158
5159/*
5160 * Schedules idle task to be the next runnable task on current CPU. 5160 * Schedules idle task to be the next runnable task on current CPU.
5161 * It does so by boosting its priority to highest possible and adding it to 5161 * It does so by boosting its priority to highest possible and adding it to
5162 * the _front_ of the runqueue. Used by CPU offline code. 5162 * the _front_ of the runqueue. Used by CPU offline code.
@@ -6494,7 +6494,7 @@ int in_sched_functions(unsigned long addr)
6494 && addr < (unsigned long)__sched_text_end); 6494 && addr < (unsigned long)__sched_text_end);
6495} 6495}
6496 6496
6497static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) 6497static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
6498{ 6498{
6499 cfs_rq->tasks_timeline = RB_ROOT; 6499 cfs_rq->tasks_timeline = RB_ROOT;
6500#ifdef CONFIG_FAIR_GROUP_SCHED 6500#ifdef CONFIG_FAIR_GROUP_SCHED