diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/debug.c | 6 | ||||
-rw-r--r-- | kernel/sched/fair.c | 23 | ||||
-rw-r--r-- | kernel/sched/stats.h | 5 |
3 files changed, 20 insertions, 14 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e076bddd4c66..196559994f7c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
124 | SEQ_printf(m, " "); | 124 | SEQ_printf(m, " "); |
125 | 125 | ||
126 | SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", | 126 | SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", |
127 | p->comm, p->pid, | 127 | p->comm, task_pid_nr(p), |
128 | SPLIT_NS(p->se.vruntime), | 128 | SPLIT_NS(p->se.vruntime), |
129 | (long long)(p->nvcsw + p->nivcsw), | 129 | (long long)(p->nvcsw + p->nivcsw), |
130 | p->prio); | 130 | p->prio); |
@@ -289,7 +289,7 @@ do { \ | |||
289 | P(nr_load_updates); | 289 | P(nr_load_updates); |
290 | P(nr_uninterruptible); | 290 | P(nr_uninterruptible); |
291 | PN(next_balance); | 291 | PN(next_balance); |
292 | P(curr->pid); | 292 | SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); |
293 | PN(clock); | 293 | PN(clock); |
294 | P(cpu_load[0]); | 294 | P(cpu_load[0]); |
295 | P(cpu_load[1]); | 295 | P(cpu_load[1]); |
@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
492 | { | 492 | { |
493 | unsigned long nr_switches; | 493 | unsigned long nr_switches; |
494 | 494 | ||
495 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, | 495 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), |
496 | get_nr_threads(p)); | 496 | get_nr_threads(p)); |
497 | SEQ_printf(m, | 497 | SEQ_printf(m, |
498 | "---------------------------------------------------------" | 498 | "---------------------------------------------------------" |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9b3fe1cd8f40..7c70201fbc61 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) | |||
4242 | } | 4242 | } |
4243 | 4243 | ||
4244 | if (!se) { | 4244 | if (!se) { |
4245 | cfs_rq->h_load = rq->avg.load_avg_contrib; | 4245 | cfs_rq->h_load = cfs_rq->runnable_load_avg; |
4246 | cfs_rq->last_h_load_update = now; | 4246 | cfs_rq->last_h_load_update = now; |
4247 | } | 4247 | } |
4248 | 4248 | ||
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | |||
4823 | (busiest->load_per_task * SCHED_POWER_SCALE) / | 4823 | (busiest->load_per_task * SCHED_POWER_SCALE) / |
4824 | busiest->group_power; | 4824 | busiest->group_power; |
4825 | 4825 | ||
4826 | if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= | 4826 | if (busiest->avg_load + scaled_busy_load_per_task >= |
4827 | (scaled_busy_load_per_task * imbn)) { | 4827 | local->avg_load + (scaled_busy_load_per_task * imbn)) { |
4828 | env->imbalance = busiest->load_per_task; | 4828 | env->imbalance = busiest->load_per_task; |
4829 | return; | 4829 | return; |
4830 | } | 4830 | } |
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4896 | * max load less than avg load(as we skip the groups at or below | 4896 | * max load less than avg load(as we skip the groups at or below |
4897 | * its cpu_power, while calculating max_load..) | 4897 | * its cpu_power, while calculating max_load..) |
4898 | */ | 4898 | */ |
4899 | if (busiest->avg_load < sds->avg_load) { | 4899 | if (busiest->avg_load <= sds->avg_load || |
4900 | local->avg_load >= sds->avg_load) { | ||
4900 | env->imbalance = 0; | 4901 | env->imbalance = 0; |
4901 | return fix_small_imbalance(env, sds); | 4902 | return fix_small_imbalance(env, sds); |
4902 | } | 4903 | } |
@@ -5928,11 +5929,15 @@ static void task_fork_fair(struct task_struct *p) | |||
5928 | cfs_rq = task_cfs_rq(current); | 5929 | cfs_rq = task_cfs_rq(current); |
5929 | curr = cfs_rq->curr; | 5930 | curr = cfs_rq->curr; |
5930 | 5931 | ||
5931 | if (unlikely(task_cpu(p) != this_cpu)) { | 5932 | /* |
5932 | rcu_read_lock(); | 5933 | * Not only the cpu but also the task_group of the parent might have |
5933 | __set_task_cpu(p, this_cpu); | 5934 | * been changed after parent->se.parent,cfs_rq were copied to |
5934 | rcu_read_unlock(); | 5935 | * child->se.parent,cfs_rq. So call __set_task_cpu() to make those |
5935 | } | 5936 | * of child point to valid ones. |
5937 | */ | ||
5938 | rcu_read_lock(); | ||
5939 | __set_task_cpu(p, this_cpu); | ||
5940 | rcu_read_unlock(); | ||
5936 | 5941 | ||
5937 | update_curr(cfs_rq); | 5942 | update_curr(cfs_rq); |
5938 | 5943 | ||
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 5aef494fc8b4..c7edee71bce8 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h | |||
@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Called when a process ceases being the active-running process, either | 107 | * Called when a process ceases being the active-running process involuntarily |
108 | * voluntarily or involuntarily. Now we can calculate how long we ran. | 108 | * due, typically, to expiring its time slice (this may also be called when |
109 | * switching to the idle task). Now we can calculate how long we ran. | ||
109 | * Also, if the process is still in the TASK_RUNNING state, call | 110 | * Also, if the process is still in the TASK_RUNNING state, call |
110 | * sched_info_queued() to mark that it has now again started waiting on | 111 | * sched_info_queued() to mark that it has now again started waiting on |
111 | * the runqueue. | 112 | * the runqueue. |