aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-18 12:23:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-18 12:23:32 -0400
commit7e28b2712e5ebd8d73d25561585bc2ae77da5c30 (patch)
tree34e54c0c411f5fa8dae537a0f9773d19f5c57351
parent186844b292140d1e33225ec95039b6adb03d3fec (diff)
parent13b62e46d5407c7d619aea1dc9c3e0991b631b57 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Fix comment for sched_info_depart sched/Documentation: Update sched-design-CFS.txt documentation sched/debug: Take PID namespace into account sched/fair: Fix small race where child->se.parent,cfs_rq might point to invalid ones
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt4
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sched/stats.h5
4 files changed, 16 insertions, 13 deletions
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index d529e02d928d..f14f49304222 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -66,9 +66,7 @@ rq->cfs.load value, which is the sum of the weights of the tasks queued on the
66runqueue. 66runqueue.
67 67
68CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the 68CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the
69p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to 69p->se.vruntime key. CFS picks the "leftmost" task from this tree and sticks to it.
70account for possible wraparounds). CFS picks the "leftmost" task from this
71tree and sticks to it.
72As the system progresses forwards, the executed tasks are put into the tree 70As the system progresses forwards, the executed tasks are put into the tree
73more and more to the right --- slowly but surely giving a chance for every task 71more and more to the right --- slowly but surely giving a chance for every task
74to become the "leftmost task" and thus get on the CPU within a deterministic 72to become the "leftmost task" and thus get on the CPU within a deterministic
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e076bddd4c66..196559994f7c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
124 SEQ_printf(m, " "); 124 SEQ_printf(m, " ");
125 125
126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
127 p->comm, p->pid, 127 p->comm, task_pid_nr(p),
128 SPLIT_NS(p->se.vruntime), 128 SPLIT_NS(p->se.vruntime),
129 (long long)(p->nvcsw + p->nivcsw), 129 (long long)(p->nvcsw + p->nivcsw),
130 p->prio); 130 p->prio);
@@ -289,7 +289,7 @@ do { \
289 P(nr_load_updates); 289 P(nr_load_updates);
290 P(nr_uninterruptible); 290 P(nr_uninterruptible);
291 PN(next_balance); 291 PN(next_balance);
292 P(curr->pid); 292 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
293 PN(clock); 293 PN(clock);
294 P(cpu_load[0]); 294 P(cpu_load[0]);
295 P(cpu_load[1]); 295 P(cpu_load[1]);
@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
492{ 492{
493 unsigned long nr_switches; 493 unsigned long nr_switches;
494 494
495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, 495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
496 get_nr_threads(p)); 496 get_nr_threads(p));
497 SEQ_printf(m, 497 SEQ_printf(m,
498 "---------------------------------------------------------" 498 "---------------------------------------------------------"
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9b3fe1cd8f40..11cd13667359 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5928,11 +5928,15 @@ static void task_fork_fair(struct task_struct *p)
5928 cfs_rq = task_cfs_rq(current); 5928 cfs_rq = task_cfs_rq(current);
5929 curr = cfs_rq->curr; 5929 curr = cfs_rq->curr;
5930 5930
5931 if (unlikely(task_cpu(p) != this_cpu)) { 5931 /*
5932 rcu_read_lock(); 5932 * Not only the cpu but also the task_group of the parent might have
5933 __set_task_cpu(p, this_cpu); 5933 * been changed after parent->se.parent,cfs_rq were copied to
5934 rcu_read_unlock(); 5934 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
5935 } 5935 * of child point to valid ones.
5936 */
5937 rcu_read_lock();
5938 __set_task_cpu(p, this_cpu);
5939 rcu_read_unlock();
5936 5940
5937 update_curr(cfs_rq); 5941 update_curr(cfs_rq);
5938 5942
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 5aef494fc8b4..c7edee71bce8 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t)
104} 104}
105 105
106/* 106/*
107 * Called when a process ceases being the active-running process, either 107 * Called when a process ceases being the active-running process involuntarily
108 * voluntarily or involuntarily. Now we can calculate how long we ran. 108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
109 * Also, if the process is still in the TASK_RUNNING state, call 110 * Also, if the process is still in the TASK_RUNNING state, call
110 * sched_info_queued() to mark that it has now again started waiting on 111 * sched_info_queued() to mark that it has now again started waiting on
111 * the runqueue. 112 * the runqueue.