aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/array.c19
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/sched.h13
-rw-r--r--init/main.c7
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/sched.c401
-rw-r--r--kernel/sched_clock.c23
-rw-r--r--kernel/sched_fair.c53
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_rt.c4
12 files changed, 336 insertions, 249 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 4badde179b18..f560325c444f 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -134,13 +134,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
134 * simple bit tests. 134 * simple bit tests.
135 */ 135 */
136static const char *task_state_array[] = { 136static const char *task_state_array[] = {
137 "R (running)", /* 0 */ 137 "R (running)", /* 0 */
138 "S (sleeping)", /* 1 */ 138 "S (sleeping)", /* 1 */
139 "D (disk sleep)", /* 2 */ 139 "D (disk sleep)", /* 2 */
140 "T (stopped)", /* 4 */ 140 "T (stopped)", /* 4 */
141 "T (tracing stop)", /* 8 */ 141 "t (tracing stop)", /* 8 */
142 "Z (zombie)", /* 16 */ 142 "Z (zombie)", /* 16 */
143 "X (dead)" /* 32 */ 143 "X (dead)", /* 32 */
144 "x (dead)", /* 64 */
145 "K (wakekill)", /* 128 */
146 "W (waking)", /* 256 */
144}; 147};
145 148
146static inline const char *get_task_state(struct task_struct *tsk) 149static inline const char *get_task_state(struct task_struct *tsk)
@@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
148 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 151 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
149 const char **p = &task_state_array[0]; 152 const char **p = &task_state_array[0];
150 153
154 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
155
151 while (state) { 156 while (state) {
152 p++; 157 p++;
153 state >>= 1; 158 state >>= 1;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index c4ba9a78721e..96cc307ed9f4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -101,4 +101,9 @@ static inline void exit_rcu(void)
101{ 101{
102} 102}
103 103
104static inline int rcu_preempt_depth(void)
105{
106 return 0;
107}
108
104#endif /* __LINUX_RCUTINY_H */ 109#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c93eee5911b0..8044b1b94333 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void); 45extern void synchronize_rcu(void);
46extern void exit_rcu(void); 46extern void exit_rcu(void);
47 47
48/*
49 * Defined as macro as it is a very low level header
50 * included from areas that don't even know about current
51 */
52#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
53
48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 54#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
49 55
50static inline void __rcu_read_lock(void) 56static inline void __rcu_read_lock(void)
@@ -63,6 +69,11 @@ static inline void exit_rcu(void)
63{ 69{
64} 70}
65 71
72static inline int rcu_preempt_depth(void)
73{
74 return 0;
75}
76
66#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 77#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
67 78
68static inline void __rcu_read_lock_bh(void) 79static inline void __rcu_read_lock_bh(void)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e89857812be6..f2f842db03ce 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
192#define TASK_DEAD 64 192#define TASK_DEAD 64
193#define TASK_WAKEKILL 128 193#define TASK_WAKEKILL 128
194#define TASK_WAKING 256 194#define TASK_WAKING 256
195#define TASK_STATE_MAX 512
196
197#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
198
199extern char ___assert_task_state[1 - 2*!!(
200 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
195 201
196/* Convenience macros for the sake of set_task_state */ 202/* Convenience macros for the sake of set_task_state */
197#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 203#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -1091,7 +1097,8 @@ struct sched_class {
1091 enum cpu_idle_type idle); 1097 enum cpu_idle_type idle);
1092 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1098 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1093 void (*post_schedule) (struct rq *this_rq); 1099 void (*post_schedule) (struct rq *this_rq);
1094 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1100 void (*task_waking) (struct rq *this_rq, struct task_struct *task);
1101 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1095 1102
1096 void (*set_cpus_allowed)(struct task_struct *p, 1103 void (*set_cpus_allowed)(struct task_struct *p,
1097 const struct cpumask *newmask); 1104 const struct cpumask *newmask);
@@ -1115,7 +1122,7 @@ struct sched_class {
1115 struct task_struct *task); 1122 struct task_struct *task);
1116 1123
1117#ifdef CONFIG_FAIR_GROUP_SCHED 1124#ifdef CONFIG_FAIR_GROUP_SCHED
1118 void (*moved_group) (struct task_struct *p); 1125 void (*moved_group) (struct task_struct *p, int on_rq);
1119#endif 1126#endif
1120}; 1127};
1121 1128
@@ -2594,8 +2601,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2594} 2601}
2595#endif /* CONFIG_MM_OWNER */ 2602#endif /* CONFIG_MM_OWNER */
2596 2603
2597#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
2598
2599#endif /* __KERNEL__ */ 2604#endif /* __KERNEL__ */
2600 2605
2601#endif 2606#endif
diff --git a/init/main.c b/init/main.c
index c3db4a98b369..dac44a9356a5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -369,12 +369,6 @@ static void __init smp_init(void)
369{ 369{
370 unsigned int cpu; 370 unsigned int cpu;
371 371
372 /*
373 * Set up the current CPU as possible to migrate to.
374 * The other ones will be done by cpu_up/cpu_down()
375 */
376 set_cpu_active(smp_processor_id(), true);
377
378 /* FIXME: This should be done in userspace --RR */ 372 /* FIXME: This should be done in userspace --RR */
379 for_each_present_cpu(cpu) { 373 for_each_present_cpu(cpu) {
380 if (num_online_cpus() >= setup_max_cpus) 374 if (num_online_cpus() >= setup_max_cpus)
@@ -486,6 +480,7 @@ static void __init boot_cpu_init(void)
486 int cpu = smp_processor_id(); 480 int cpu = smp_processor_id();
487 /* Mark the boot cpu "present", "online" etc for SMP and UP case */ 481 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
488 set_cpu_online(cpu, true); 482 set_cpu_online(cpu, true);
483 set_cpu_active(cpu, true);
489 set_cpu_present(cpu, true); 484 set_cpu_present(cpu, true);
490 set_cpu_possible(cpu, true); 485 set_cpu_possible(cpu, true);
491} 486}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 291ac586f37f..1c8ddd6ee940 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -209,6 +209,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 cpu_hotplug_begin(); 211 cpu_hotplug_begin();
212 set_cpu_active(cpu, false);
212 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 213 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
213 hcpu, -1, &nr_calls); 214 hcpu, -1, &nr_calls);
214 if (err == NOTIFY_BAD) { 215 if (err == NOTIFY_BAD) {
@@ -280,18 +281,6 @@ int __ref cpu_down(unsigned int cpu)
280 goto out; 281 goto out;
281 } 282 }
282 283
283 set_cpu_active(cpu, false);
284
285 /*
286 * Make sure the all cpus did the reschedule and are not
287 * using stale version of the cpu_active_mask.
288 * This is not strictly necessary becuase stop_machine()
289 * that we run down the line already provides the required
290 * synchronization. But it's really a side effect and we do not
291 * want to depend on the innards of the stop_machine here.
292 */
293 synchronize_sched();
294
295 err = _cpu_down(cpu, 0); 284 err = _cpu_down(cpu, 0);
296 285
297out: 286out:
@@ -382,19 +371,12 @@ int disable_nonboot_cpus(void)
382 return error; 371 return error;
383 cpu_maps_update_begin(); 372 cpu_maps_update_begin();
384 first_cpu = cpumask_first(cpu_online_mask); 373 first_cpu = cpumask_first(cpu_online_mask);
385 /* We take down all of the non-boot CPUs in one shot to avoid races 374 /*
375 * We take down all of the non-boot CPUs in one shot to avoid races
386 * with the userspace trying to use the CPU hotplug at the same time 376 * with the userspace trying to use the CPU hotplug at the same time
387 */ 377 */
388 cpumask_clear(frozen_cpus); 378 cpumask_clear(frozen_cpus);
389 379
390 for_each_online_cpu(cpu) {
391 if (cpu == first_cpu)
392 continue;
393 set_cpu_active(cpu, false);
394 }
395
396 synchronize_sched();
397
398 printk("Disabling non-boot CPUs ...\n"); 380 printk("Disabling non-boot CPUs ...\n");
399 for_each_online_cpu(cpu) { 381 for_each_online_cpu(cpu) {
400 if (cpu == first_cpu) 382 if (cpu == first_cpu)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index ab7ae57773e1..fbb6222fe7e0 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,6 +150,29 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
150EXPORT_SYMBOL(kthread_create); 150EXPORT_SYMBOL(kthread_create);
151 151
152/** 152/**
153 * kthread_bind - bind a just-created kthread to a cpu.
154 * @p: thread created by kthread_create().
155 * @cpu: cpu (might not be online, must be possible) for @k to run on.
156 *
157 * Description: This function is equivalent to set_cpus_allowed(),
158 * except that @cpu doesn't need to be online, and the thread must be
159 * stopped (i.e., just returned from kthread_create()).
160 */
161void kthread_bind(struct task_struct *p, unsigned int cpu)
162{
163 /* Must have done schedule() in kthread() before we set_task_cpu */
164 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
165 WARN_ON(1);
166 return;
167 }
168
169 p->cpus_allowed = cpumask_of_cpu(cpu);
170 p->rt.nr_cpus_allowed = 1;
171 p->flags |= PF_THREAD_BOUND;
172}
173EXPORT_SYMBOL(kthread_bind);
174
175/**
153 * kthread_stop - stop a thread created by kthread_create(). 176 * kthread_stop - stop a thread created by kthread_create().
154 * @k: thread created by kthread_create(). 177 * @k: thread created by kthread_create().
155 * 178 *
diff --git a/kernel/sched.c b/kernel/sched.c
index 18cceeecce35..720df108a2d6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -26,6 +26,8 @@
26 * Thomas Gleixner, Mike Kravetz 26 * Thomas Gleixner, Mike Kravetz
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/mm.h> 31#include <linux/mm.h>
30#include <linux/module.h> 32#include <linux/module.h>
31#include <linux/nmi.h> 33#include <linux/nmi.h>
@@ -2002,39 +2004,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2002 p->sched_class->prio_changed(rq, p, oldprio, running); 2004 p->sched_class->prio_changed(rq, p, oldprio, running);
2003} 2005}
2004 2006
2005/**
2006 * kthread_bind - bind a just-created kthread to a cpu.
2007 * @p: thread created by kthread_create().
2008 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2009 *
2010 * Description: This function is equivalent to set_cpus_allowed(),
2011 * except that @cpu doesn't need to be online, and the thread must be
2012 * stopped (i.e., just returned from kthread_create()).
2013 *
2014 * Function lives here instead of kthread.c because it messes with
2015 * scheduler internals which require locking.
2016 */
2017void kthread_bind(struct task_struct *p, unsigned int cpu)
2018{
2019 struct rq *rq = cpu_rq(cpu);
2020 unsigned long flags;
2021
2022 /* Must have done schedule() in kthread() before we set_task_cpu */
2023 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2024 WARN_ON(1);
2025 return;
2026 }
2027
2028 raw_spin_lock_irqsave(&rq->lock, flags);
2029 update_rq_clock(rq);
2030 set_task_cpu(p, cpu);
2031 p->cpus_allowed = cpumask_of_cpu(cpu);
2032 p->rt.nr_cpus_allowed = 1;
2033 p->flags |= PF_THREAD_BOUND;
2034 raw_spin_unlock_irqrestore(&rq->lock, flags);
2035}
2036EXPORT_SYMBOL(kthread_bind);
2037
2038#ifdef CONFIG_SMP 2007#ifdef CONFIG_SMP
2039/* 2008/*
2040 * Is this task likely cache-hot: 2009 * Is this task likely cache-hot:
@@ -2044,6 +2013,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2044{ 2013{
2045 s64 delta; 2014 s64 delta;
2046 2015
2016 if (p->sched_class != &fair_sched_class)
2017 return 0;
2018
2047 /* 2019 /*
2048 * Buddy candidates are cache hot: 2020 * Buddy candidates are cache hot:
2049 */ 2021 */
@@ -2052,9 +2024,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2052 &p->se == cfs_rq_of(&p->se)->last)) 2024 &p->se == cfs_rq_of(&p->se)->last))
2053 return 1; 2025 return 1;
2054 2026
2055 if (p->sched_class != &fair_sched_class)
2056 return 0;
2057
2058 if (sysctl_sched_migration_cost == -1) 2027 if (sysctl_sched_migration_cost == -1)
2059 return 1; 2028 return 1;
2060 if (sysctl_sched_migration_cost == 0) 2029 if (sysctl_sched_migration_cost == 0)
@@ -2065,22 +2034,24 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2065 return delta < (s64)sysctl_sched_migration_cost; 2034 return delta < (s64)sysctl_sched_migration_cost;
2066} 2035}
2067 2036
2068
2069void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2037void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2070{ 2038{
2071 int old_cpu = task_cpu(p); 2039#ifdef CONFIG_SCHED_DEBUG
2072 struct cfs_rq *old_cfsrq = task_cfs_rq(p), 2040 /*
2073 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); 2041 * We should never call set_task_cpu() on a blocked task,
2042 * ttwu() will sort out the placement.
2043 */
2044 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2045 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2046#endif
2074 2047
2075 trace_sched_migrate_task(p, new_cpu); 2048 trace_sched_migrate_task(p, new_cpu);
2076 2049
2077 if (old_cpu != new_cpu) { 2050 if (task_cpu(p) == new_cpu)
2078 p->se.nr_migrations++; 2051 return;
2079 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2052
2080 1, 1, NULL, 0); 2053 p->se.nr_migrations++;
2081 } 2054 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2082 p->se.vruntime -= old_cfsrq->min_vruntime -
2083 new_cfsrq->min_vruntime;
2084 2055
2085 __set_task_cpu(p, new_cpu); 2056 __set_task_cpu(p, new_cpu);
2086} 2057}
@@ -2105,13 +2076,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2105 2076
2106 /* 2077 /*
2107 * If the task is not on a runqueue (and not running), then 2078 * If the task is not on a runqueue (and not running), then
2108 * it is sufficient to simply update the task's cpu field. 2079 * the next wake-up will properly place the task.
2109 */ 2080 */
2110 if (!p->se.on_rq && !task_running(rq, p)) { 2081 if (!p->se.on_rq && !task_running(rq, p))
2111 update_rq_clock(rq);
2112 set_task_cpu(p, dest_cpu);
2113 return 0; 2082 return 0;
2114 }
2115 2083
2116 init_completion(&req->done); 2084 init_completion(&req->done);
2117 req->task = p; 2085 req->task = p;
@@ -2317,10 +2285,73 @@ void task_oncpu_function_call(struct task_struct *p,
2317} 2285}
2318 2286
2319#ifdef CONFIG_SMP 2287#ifdef CONFIG_SMP
2288static int select_fallback_rq(int cpu, struct task_struct *p)
2289{
2290 int dest_cpu;
2291 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2292
2293 /* Look for allowed, online CPU in same node. */
2294 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2295 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2296 return dest_cpu;
2297
2298 /* Any allowed, online CPU? */
2299 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2300 if (dest_cpu < nr_cpu_ids)
2301 return dest_cpu;
2302
2303 /* No more Mr. Nice Guy. */
2304 if (dest_cpu >= nr_cpu_ids) {
2305 rcu_read_lock();
2306 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2307 rcu_read_unlock();
2308 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2309
2310 /*
2311 * Don't tell them about moving exiting tasks or
2312 * kernel threads (both mm NULL), since they never
2313 * leave kernel.
2314 */
2315 if (p->mm && printk_ratelimit()) {
2316 printk(KERN_INFO "process %d (%s) no "
2317 "longer affine to cpu%d\n",
2318 task_pid_nr(p), p->comm, cpu);
2319 }
2320 }
2321
2322 return dest_cpu;
2323}
2324
2325/*
2326 * Called from:
2327 *
2328 * - fork, @p is stable because it isn't on the tasklist yet
2329 *
2330 * - exec, @p is unstable, retry loop
2331 *
2332 * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
2333 * we should be good.
2334 */
2320static inline 2335static inline
2321int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2336int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2322{ 2337{
2323 return p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2338 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2339
2340 /*
2341 * In order not to call set_task_cpu() on a blocking task we need
2342 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2343 * cpu.
2344 *
2345 * Since this is common to all placement strategies, this lives here.
2346 *
2347 * [ this allows ->select_task() to simply return task_cpu(p) and
2348 * not worry about this generic constraint ]
2349 */
2350 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2351 !cpu_active(cpu)))
2352 cpu = select_fallback_rq(task_cpu(p), p);
2353
2354 return cpu;
2324} 2355}
2325#endif 2356#endif
2326 2357
@@ -2375,6 +2406,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2375 if (task_contributes_to_load(p)) 2406 if (task_contributes_to_load(p))
2376 rq->nr_uninterruptible--; 2407 rq->nr_uninterruptible--;
2377 p->state = TASK_WAKING; 2408 p->state = TASK_WAKING;
2409
2410 if (p->sched_class->task_waking)
2411 p->sched_class->task_waking(rq, p);
2412
2378 __task_rq_unlock(rq); 2413 __task_rq_unlock(rq);
2379 2414
2380 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2415 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
@@ -2438,8 +2473,8 @@ out_running:
2438 2473
2439 p->state = TASK_RUNNING; 2474 p->state = TASK_RUNNING;
2440#ifdef CONFIG_SMP 2475#ifdef CONFIG_SMP
2441 if (p->sched_class->task_wake_up) 2476 if (p->sched_class->task_woken)
2442 p->sched_class->task_wake_up(rq, p); 2477 p->sched_class->task_woken(rq, p);
2443 2478
2444 if (unlikely(rq->idle_stamp)) { 2479 if (unlikely(rq->idle_stamp)) {
2445 u64 delta = rq->clock - rq->idle_stamp; 2480 u64 delta = rq->clock - rq->idle_stamp;
@@ -2538,14 +2573,6 @@ static void __sched_fork(struct task_struct *p)
2538#ifdef CONFIG_PREEMPT_NOTIFIERS 2573#ifdef CONFIG_PREEMPT_NOTIFIERS
2539 INIT_HLIST_HEAD(&p->preempt_notifiers); 2574 INIT_HLIST_HEAD(&p->preempt_notifiers);
2540#endif 2575#endif
2541
2542 /*
2543 * We mark the process as running here, but have not actually
2544 * inserted it onto the runqueue yet. This guarantees that
2545 * nobody will actually run it, and a signal or other external
2546 * event cannot wake it up and insert it on the runqueue either.
2547 */
2548 p->state = TASK_RUNNING;
2549} 2576}
2550 2577
2551/* 2578/*
@@ -2556,6 +2583,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
2556 int cpu = get_cpu(); 2583 int cpu = get_cpu();
2557 2584
2558 __sched_fork(p); 2585 __sched_fork(p);
2586 /*
2587 * We mark the process as waking here. This guarantees that
2588 * nobody will actually run it, and a signal or other external
2589 * event cannot wake it up and insert it on the runqueue either.
2590 */
2591 p->state = TASK_WAKING;
2559 2592
2560 /* 2593 /*
2561 * Revert to default priority/policy on fork if requested. 2594 * Revert to default priority/policy on fork if requested.
@@ -2624,14 +2657,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2624 struct rq *rq; 2657 struct rq *rq;
2625 2658
2626 rq = task_rq_lock(p, &flags); 2659 rq = task_rq_lock(p, &flags);
2627 BUG_ON(p->state != TASK_RUNNING); 2660 BUG_ON(p->state != TASK_WAKING);
2661 p->state = TASK_RUNNING;
2628 update_rq_clock(rq); 2662 update_rq_clock(rq);
2629 activate_task(rq, p, 0); 2663 activate_task(rq, p, 0);
2630 trace_sched_wakeup_new(rq, p, 1); 2664 trace_sched_wakeup_new(rq, p, 1);
2631 check_preempt_curr(rq, p, WF_FORK); 2665 check_preempt_curr(rq, p, WF_FORK);
2632#ifdef CONFIG_SMP 2666#ifdef CONFIG_SMP
2633 if (p->sched_class->task_wake_up) 2667 if (p->sched_class->task_woken)
2634 p->sched_class->task_wake_up(rq, p); 2668 p->sched_class->task_woken(rq, p);
2635#endif 2669#endif
2636 task_rq_unlock(rq, &flags); 2670 task_rq_unlock(rq, &flags);
2637} 2671}
@@ -3101,21 +3135,36 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3101} 3135}
3102 3136
3103/* 3137/*
3104 * If dest_cpu is allowed for this process, migrate the task to it. 3138 * sched_exec - execve() is a valuable balancing opportunity, because at
3105 * This is accomplished by forcing the cpu_allowed mask to only 3139 * this point the task has the smallest effective memory and cache footprint.
3106 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
3107 * the cpu_allowed mask is restored.
3108 */ 3140 */
3109static void sched_migrate_task(struct task_struct *p, int dest_cpu) 3141void sched_exec(void)
3110{ 3142{
3143 struct task_struct *p = current;
3111 struct migration_req req; 3144 struct migration_req req;
3145 int dest_cpu, this_cpu;
3112 unsigned long flags; 3146 unsigned long flags;
3113 struct rq *rq; 3147 struct rq *rq;
3114 3148
3149again:
3150 this_cpu = get_cpu();
3151 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3152 if (dest_cpu == this_cpu) {
3153 put_cpu();
3154 return;
3155 }
3156
3115 rq = task_rq_lock(p, &flags); 3157 rq = task_rq_lock(p, &flags);
3158 put_cpu();
3159
3160 /*
3161 * select_task_rq() can race against ->cpus_allowed
3162 */
3116 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3163 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
3117 || unlikely(!cpu_active(dest_cpu))) 3164 || unlikely(!cpu_active(dest_cpu))) {
3118 goto out; 3165 task_rq_unlock(rq, &flags);
3166 goto again;
3167 }
3119 3168
3120 /* force the process onto the specified CPU */ 3169 /* force the process onto the specified CPU */
3121 if (migrate_task(p, dest_cpu, &req)) { 3170 if (migrate_task(p, dest_cpu, &req)) {
@@ -3130,24 +3179,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
3130 3179
3131 return; 3180 return;
3132 } 3181 }
3133out:
3134 task_rq_unlock(rq, &flags); 3182 task_rq_unlock(rq, &flags);
3135} 3183}
3136 3184
3137/* 3185/*
3138 * sched_exec - execve() is a valuable balancing opportunity, because at
3139 * this point the task has the smallest effective memory and cache footprint.
3140 */
3141void sched_exec(void)
3142{
3143 int new_cpu, this_cpu = get_cpu();
3144 new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
3145 put_cpu();
3146 if (new_cpu != this_cpu)
3147 sched_migrate_task(current, new_cpu);
3148}
3149
3150/*
3151 * pull_task - move a task from a remote runqueue to the local runqueue. 3186 * pull_task - move a task from a remote runqueue to the local runqueue.
3152 * Both runqueues must be locked. 3187 * Both runqueues must be locked.
3153 */ 3188 */
@@ -5340,8 +5375,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
5340{ 5375{
5341 struct pt_regs *regs = get_irq_regs(); 5376 struct pt_regs *regs = get_irq_regs();
5342 5377
5343 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5378 pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n",
5344 prev->comm, prev->pid, preempt_count()); 5379 prev->comm, prev->pid, preempt_count());
5345 5380
5346 debug_show_held_locks(prev); 5381 debug_show_held_locks(prev);
5347 print_modules(); 5382 print_modules();
@@ -5911,14 +5946,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
5911 */ 5946 */
5912bool try_wait_for_completion(struct completion *x) 5947bool try_wait_for_completion(struct completion *x)
5913{ 5948{
5949 unsigned long flags;
5914 int ret = 1; 5950 int ret = 1;
5915 5951
5916 spin_lock_irq(&x->wait.lock); 5952 spin_lock_irqsave(&x->wait.lock, flags);
5917 if (!x->done) 5953 if (!x->done)
5918 ret = 0; 5954 ret = 0;
5919 else 5955 else
5920 x->done--; 5956 x->done--;
5921 spin_unlock_irq(&x->wait.lock); 5957 spin_unlock_irqrestore(&x->wait.lock, flags);
5922 return ret; 5958 return ret;
5923} 5959}
5924EXPORT_SYMBOL(try_wait_for_completion); 5960EXPORT_SYMBOL(try_wait_for_completion);
@@ -5933,12 +5969,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
5933 */ 5969 */
5934bool completion_done(struct completion *x) 5970bool completion_done(struct completion *x)
5935{ 5971{
5972 unsigned long flags;
5936 int ret = 1; 5973 int ret = 1;
5937 5974
5938 spin_lock_irq(&x->wait.lock); 5975 spin_lock_irqsave(&x->wait.lock, flags);
5939 if (!x->done) 5976 if (!x->done)
5940 ret = 0; 5977 ret = 0;
5941 spin_unlock_irq(&x->wait.lock); 5978 spin_unlock_irqrestore(&x->wait.lock, flags);
5942 return ret; 5979 return ret;
5943} 5980}
5944EXPORT_SYMBOL(completion_done); 5981EXPORT_SYMBOL(completion_done);
@@ -6457,7 +6494,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6457 return -EINVAL; 6494 return -EINVAL;
6458 6495
6459 retval = -ESRCH; 6496 retval = -ESRCH;
6460 read_lock(&tasklist_lock); 6497 rcu_read_lock();
6461 p = find_process_by_pid(pid); 6498 p = find_process_by_pid(pid);
6462 if (p) { 6499 if (p) {
6463 retval = security_task_getscheduler(p); 6500 retval = security_task_getscheduler(p);
@@ -6465,7 +6502,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6465 retval = p->policy 6502 retval = p->policy
6466 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 6503 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6467 } 6504 }
6468 read_unlock(&tasklist_lock); 6505 rcu_read_unlock();
6469 return retval; 6506 return retval;
6470} 6507}
6471 6508
@@ -6483,7 +6520,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6483 if (!param || pid < 0) 6520 if (!param || pid < 0)
6484 return -EINVAL; 6521 return -EINVAL;
6485 6522
6486 read_lock(&tasklist_lock); 6523 rcu_read_lock();
6487 p = find_process_by_pid(pid); 6524 p = find_process_by_pid(pid);
6488 retval = -ESRCH; 6525 retval = -ESRCH;
6489 if (!p) 6526 if (!p)
@@ -6494,7 +6531,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6494 goto out_unlock; 6531 goto out_unlock;
6495 6532
6496 lp.sched_priority = p->rt_priority; 6533 lp.sched_priority = p->rt_priority;
6497 read_unlock(&tasklist_lock); 6534 rcu_read_unlock();
6498 6535
6499 /* 6536 /*
6500 * This one might sleep, we cannot do it with a spinlock held ... 6537 * This one might sleep, we cannot do it with a spinlock held ...
@@ -6504,7 +6541,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6504 return retval; 6541 return retval;
6505 6542
6506out_unlock: 6543out_unlock:
6507 read_unlock(&tasklist_lock); 6544 rcu_read_unlock();
6508 return retval; 6545 return retval;
6509} 6546}
6510 6547
@@ -6515,22 +6552,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6515 int retval; 6552 int retval;
6516 6553
6517 get_online_cpus(); 6554 get_online_cpus();
6518 read_lock(&tasklist_lock); 6555 rcu_read_lock();
6519 6556
6520 p = find_process_by_pid(pid); 6557 p = find_process_by_pid(pid);
6521 if (!p) { 6558 if (!p) {
6522 read_unlock(&tasklist_lock); 6559 rcu_read_unlock();
6523 put_online_cpus(); 6560 put_online_cpus();
6524 return -ESRCH; 6561 return -ESRCH;
6525 } 6562 }
6526 6563
6527 /* 6564 /* Prevent p going away */
6528 * It is not safe to call set_cpus_allowed with the
6529 * tasklist_lock held. We will bump the task_struct's
6530 * usage count and then drop tasklist_lock.
6531 */
6532 get_task_struct(p); 6565 get_task_struct(p);
6533 read_unlock(&tasklist_lock); 6566 rcu_read_unlock();
6534 6567
6535 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 6568 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6536 retval = -ENOMEM; 6569 retval = -ENOMEM;
@@ -6616,7 +6649,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6616 int retval; 6649 int retval;
6617 6650
6618 get_online_cpus(); 6651 get_online_cpus();
6619 read_lock(&tasklist_lock); 6652 rcu_read_lock();
6620 6653
6621 retval = -ESRCH; 6654 retval = -ESRCH;
6622 p = find_process_by_pid(pid); 6655 p = find_process_by_pid(pid);
@@ -6632,7 +6665,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6632 task_rq_unlock(rq, &flags); 6665 task_rq_unlock(rq, &flags);
6633 6666
6634out_unlock: 6667out_unlock:
6635 read_unlock(&tasklist_lock); 6668 rcu_read_unlock();
6636 put_online_cpus(); 6669 put_online_cpus();
6637 6670
6638 return retval; 6671 return retval;
@@ -6876,7 +6909,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6876 return -EINVAL; 6909 return -EINVAL;
6877 6910
6878 retval = -ESRCH; 6911 retval = -ESRCH;
6879 read_lock(&tasklist_lock); 6912 rcu_read_lock();
6880 p = find_process_by_pid(pid); 6913 p = find_process_by_pid(pid);
6881 if (!p) 6914 if (!p)
6882 goto out_unlock; 6915 goto out_unlock;
@@ -6889,13 +6922,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6889 time_slice = p->sched_class->get_rr_interval(rq, p); 6922 time_slice = p->sched_class->get_rr_interval(rq, p);
6890 task_rq_unlock(rq, &flags); 6923 task_rq_unlock(rq, &flags);
6891 6924
6892 read_unlock(&tasklist_lock); 6925 rcu_read_unlock();
6893 jiffies_to_timespec(time_slice, &t); 6926 jiffies_to_timespec(time_slice, &t);
6894 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6927 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
6895 return retval; 6928 return retval;
6896 6929
6897out_unlock: 6930out_unlock:
6898 read_unlock(&tasklist_lock); 6931 rcu_read_unlock();
6899 return retval; 6932 return retval;
6900} 6933}
6901 6934
@@ -6907,23 +6940,23 @@ void sched_show_task(struct task_struct *p)
6907 unsigned state; 6940 unsigned state;
6908 6941
6909 state = p->state ? __ffs(p->state) + 1 : 0; 6942 state = p->state ? __ffs(p->state) + 1 : 0;
6910 printk(KERN_INFO "%-13.13s %c", p->comm, 6943 pr_info("%-13.13s %c", p->comm,
6911 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 6944 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
6912#if BITS_PER_LONG == 32 6945#if BITS_PER_LONG == 32
6913 if (state == TASK_RUNNING) 6946 if (state == TASK_RUNNING)
6914 printk(KERN_CONT " running "); 6947 pr_cont(" running ");
6915 else 6948 else
6916 printk(KERN_CONT " %08lx ", thread_saved_pc(p)); 6949 pr_cont(" %08lx ", thread_saved_pc(p));
6917#else 6950#else
6918 if (state == TASK_RUNNING) 6951 if (state == TASK_RUNNING)
6919 printk(KERN_CONT " running task "); 6952 pr_cont(" running task ");
6920 else 6953 else
6921 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 6954 pr_cont(" %016lx ", thread_saved_pc(p));
6922#endif 6955#endif
6923#ifdef CONFIG_DEBUG_STACK_USAGE 6956#ifdef CONFIG_DEBUG_STACK_USAGE
6924 free = stack_not_used(p); 6957 free = stack_not_used(p);
6925#endif 6958#endif
6926 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 6959 pr_cont("%5lu %5d %6d 0x%08lx\n", free,
6927 task_pid_nr(p), task_pid_nr(p->real_parent), 6960 task_pid_nr(p), task_pid_nr(p->real_parent),
6928 (unsigned long)task_thread_info(p)->flags); 6961 (unsigned long)task_thread_info(p)->flags);
6929 6962
@@ -6935,11 +6968,9 @@ void show_state_filter(unsigned long state_filter)
6935 struct task_struct *g, *p; 6968 struct task_struct *g, *p;
6936 6969
6937#if BITS_PER_LONG == 32 6970#if BITS_PER_LONG == 32
6938 printk(KERN_INFO 6971 pr_info(" task PC stack pid father\n");
6939 " task PC stack pid father\n");
6940#else 6972#else
6941 printk(KERN_INFO 6973 pr_info(" task PC stack pid father\n");
6942 " task PC stack pid father\n");
6943#endif 6974#endif
6944 read_lock(&tasklist_lock); 6975 read_lock(&tasklist_lock);
6945 do_each_thread(g, p) { 6976 do_each_thread(g, p) {
@@ -6986,6 +7017,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6986 raw_spin_lock_irqsave(&rq->lock, flags); 7017 raw_spin_lock_irqsave(&rq->lock, flags);
6987 7018
6988 __sched_fork(idle); 7019 __sched_fork(idle);
7020 idle->state = TASK_RUNNING;
6989 idle->se.exec_start = sched_clock(); 7021 idle->se.exec_start = sched_clock();
6990 7022
6991 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 7023 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -7100,7 +7132,23 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7100 struct rq *rq; 7132 struct rq *rq;
7101 int ret = 0; 7133 int ret = 0;
7102 7134
7135 /*
7136 * Since we rely on wake-ups to migrate sleeping tasks, don't change
7137 * the ->cpus_allowed mask from under waking tasks, which would be
7138 * possible when we change rq->lock in ttwu(), so synchronize against
7139 * TASK_WAKING to avoid that.
7140 */
7141again:
7142 while (p->state == TASK_WAKING)
7143 cpu_relax();
7144
7103 rq = task_rq_lock(p, &flags); 7145 rq = task_rq_lock(p, &flags);
7146
7147 if (p->state == TASK_WAKING) {
7148 task_rq_unlock(rq, &flags);
7149 goto again;
7150 }
7151
7104 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 7152 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
7105 ret = -EINVAL; 7153 ret = -EINVAL;
7106 goto out; 7154 goto out;
@@ -7156,7 +7204,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
7156static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 7204static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7157{ 7205{
7158 struct rq *rq_dest, *rq_src; 7206 struct rq *rq_dest, *rq_src;
7159 int ret = 0, on_rq; 7207 int ret = 0;
7160 7208
7161 if (unlikely(!cpu_active(dest_cpu))) 7209 if (unlikely(!cpu_active(dest_cpu)))
7162 return ret; 7210 return ret;
@@ -7172,12 +7220,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7172 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 7220 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7173 goto fail; 7221 goto fail;
7174 7222
7175 on_rq = p->se.on_rq; 7223 /*
7176 if (on_rq) 7224 * If we're not on a rq, the next wake-up will ensure we're
7225 * placed properly.
7226 */
7227 if (p->se.on_rq) {
7177 deactivate_task(rq_src, p, 0); 7228 deactivate_task(rq_src, p, 0);
7178 7229 set_task_cpu(p, dest_cpu);
7179 set_task_cpu(p, dest_cpu);
7180 if (on_rq) {
7181 activate_task(rq_dest, p, 0); 7230 activate_task(rq_dest, p, 0);
7182 check_preempt_curr(rq_dest, p, 0); 7231 check_preempt_curr(rq_dest, p, 0);
7183 } 7232 }
@@ -7273,37 +7322,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
7273static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 7322static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
7274{ 7323{
7275 int dest_cpu; 7324 int dest_cpu;
7276 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
7277 7325
7278again: 7326again:
7279 /* Look for allowed, online CPU in same node. */ 7327 dest_cpu = select_fallback_rq(dead_cpu, p);
7280 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
7281 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7282 goto move;
7283
7284 /* Any allowed, online CPU? */
7285 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
7286 if (dest_cpu < nr_cpu_ids)
7287 goto move;
7288
7289 /* No more Mr. Nice Guy. */
7290 if (dest_cpu >= nr_cpu_ids) {
7291 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
7292 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
7293
7294 /*
7295 * Don't tell them about moving exiting tasks or
7296 * kernel threads (both mm NULL), since they never
7297 * leave kernel.
7298 */
7299 if (p->mm && printk_ratelimit()) {
7300 printk(KERN_INFO "process %d (%s) no "
7301 "longer affine to cpu%d\n",
7302 task_pid_nr(p), p->comm, dead_cpu);
7303 }
7304 }
7305 7328
7306move:
7307 /* It can have affinity changed while we were choosing. */ 7329 /* It can have affinity changed while we were choosing. */
7308 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 7330 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
7309 goto again; 7331 goto again;
@@ -7806,48 +7828,44 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7806 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 7828 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
7807 7829
7808 if (!(sd->flags & SD_LOAD_BALANCE)) { 7830 if (!(sd->flags & SD_LOAD_BALANCE)) {
7809 printk("does not load-balance\n"); 7831 pr_cont("does not load-balance\n");
7810 if (sd->parent) 7832 if (sd->parent)
7811 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 7833 pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n");
7812 " has parent");
7813 return -1; 7834 return -1;
7814 } 7835 }
7815 7836
7816 printk(KERN_CONT "span %s level %s\n", str, sd->name); 7837 pr_cont("span %s level %s\n", str, sd->name);
7817 7838
7818 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 7839 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
7819 printk(KERN_ERR "ERROR: domain->span does not contain " 7840 pr_err("ERROR: domain->span does not contain CPU%d\n", cpu);
7820 "CPU%d\n", cpu);
7821 } 7841 }
7822 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 7842 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
7823 printk(KERN_ERR "ERROR: domain->groups does not contain" 7843 pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu);
7824 " CPU%d\n", cpu);
7825 } 7844 }
7826 7845
7827 printk(KERN_DEBUG "%*s groups:", level + 1, ""); 7846 printk(KERN_DEBUG "%*s groups:", level + 1, "");
7828 do { 7847 do {
7829 if (!group) { 7848 if (!group) {
7830 printk("\n"); 7849 pr_cont("\n");
7831 printk(KERN_ERR "ERROR: group is NULL\n"); 7850 pr_err("ERROR: group is NULL\n");
7832 break; 7851 break;
7833 } 7852 }
7834 7853
7835 if (!group->cpu_power) { 7854 if (!group->cpu_power) {
7836 printk(KERN_CONT "\n"); 7855 pr_cont("\n");
7837 printk(KERN_ERR "ERROR: domain->cpu_power not " 7856 pr_err("ERROR: domain->cpu_power not set\n");
7838 "set\n");
7839 break; 7857 break;
7840 } 7858 }
7841 7859
7842 if (!cpumask_weight(sched_group_cpus(group))) { 7860 if (!cpumask_weight(sched_group_cpus(group))) {
7843 printk(KERN_CONT "\n"); 7861 pr_cont("\n");
7844 printk(KERN_ERR "ERROR: empty group\n"); 7862 pr_err("ERROR: empty group\n");
7845 break; 7863 break;
7846 } 7864 }
7847 7865
7848 if (cpumask_intersects(groupmask, sched_group_cpus(group))) { 7866 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
7849 printk(KERN_CONT "\n"); 7867 pr_cont("\n");
7850 printk(KERN_ERR "ERROR: repeated CPUs\n"); 7868 pr_err("ERROR: repeated CPUs\n");
7851 break; 7869 break;
7852 } 7870 }
7853 7871
@@ -7855,23 +7873,21 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7855 7873
7856 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7874 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7857 7875
7858 printk(KERN_CONT " %s", str); 7876 pr_cont(" %s", str);
7859 if (group->cpu_power != SCHED_LOAD_SCALE) { 7877 if (group->cpu_power != SCHED_LOAD_SCALE) {
7860 printk(KERN_CONT " (cpu_power = %d)", 7878 pr_cont(" (cpu_power = %d)", group->cpu_power);
7861 group->cpu_power);
7862 } 7879 }
7863 7880
7864 group = group->next; 7881 group = group->next;
7865 } while (group != sd->groups); 7882 } while (group != sd->groups);
7866 printk(KERN_CONT "\n"); 7883 pr_cont("\n");
7867 7884
7868 if (!cpumask_equal(sched_domain_span(sd), groupmask)) 7885 if (!cpumask_equal(sched_domain_span(sd), groupmask))
7869 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 7886 pr_err("ERROR: groups don't span domain->span\n");
7870 7887
7871 if (sd->parent && 7888 if (sd->parent &&
7872 !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 7889 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
7873 printk(KERN_ERR "ERROR: parent span is not a superset " 7890 pr_err("ERROR: parent span is not a superset of domain->span\n");
7874 "of domain->span\n");
7875 return 0; 7891 return 0;
7876} 7892}
7877 7893
@@ -8427,8 +8443,7 @@ static int build_numa_sched_groups(struct s_data *d,
8427 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8443 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8428 GFP_KERNEL, num); 8444 GFP_KERNEL, num);
8429 if (!sg) { 8445 if (!sg) {
8430 printk(KERN_WARNING "Can not alloc domain group for node %d\n", 8446 pr_warning("Can not alloc domain group for node %d\n", num);
8431 num);
8432 return -ENOMEM; 8447 return -ENOMEM;
8433 } 8448 }
8434 d->sched_group_nodes[num] = sg; 8449 d->sched_group_nodes[num] = sg;
@@ -8457,8 +8472,8 @@ static int build_numa_sched_groups(struct s_data *d,
8457 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8472 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8458 GFP_KERNEL, num); 8473 GFP_KERNEL, num);
8459 if (!sg) { 8474 if (!sg) {
8460 printk(KERN_WARNING 8475 pr_warning("Can not alloc domain group for node %d\n",
8461 "Can not alloc domain group for node %d\n", j); 8476 j);
8462 return -ENOMEM; 8477 return -ENOMEM;
8463 } 8478 }
8464 sg->cpu_power = 0; 8479 sg->cpu_power = 0;
@@ -8686,7 +8701,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8686 d->sched_group_nodes = kcalloc(nr_node_ids, 8701 d->sched_group_nodes = kcalloc(nr_node_ids,
8687 sizeof(struct sched_group *), GFP_KERNEL); 8702 sizeof(struct sched_group *), GFP_KERNEL);
8688 if (!d->sched_group_nodes) { 8703 if (!d->sched_group_nodes) {
8689 printk(KERN_WARNING "Can not alloc sched group node list\n"); 8704 pr_warning("Can not alloc sched group node list\n");
8690 return sa_notcovered; 8705 return sa_notcovered;
8691 } 8706 }
8692 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; 8707 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
@@ -8703,7 +8718,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8703 return sa_send_covered; 8718 return sa_send_covered;
8704 d->rd = alloc_rootdomain(); 8719 d->rd = alloc_rootdomain();
8705 if (!d->rd) { 8720 if (!d->rd) {
8706 printk(KERN_WARNING "Cannot alloc root domain\n"); 8721 pr_warning("Cannot alloc root domain\n");
8707 return sa_tmpmask; 8722 return sa_tmpmask;
8708 } 8723 }
8709 return sa_rootdomain; 8724 return sa_rootdomain;
@@ -9668,7 +9683,7 @@ void __init sched_init(void)
9668#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 9683#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9669static inline int preempt_count_equals(int preempt_offset) 9684static inline int preempt_count_equals(int preempt_offset)
9670{ 9685{
9671 int nested = preempt_count() & ~PREEMPT_ACTIVE; 9686 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
9672 9687
9673 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 9688 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9674} 9689}
@@ -9685,13 +9700,11 @@ void __might_sleep(char *file, int line, int preempt_offset)
9685 return; 9700 return;
9686 prev_jiffy = jiffies; 9701 prev_jiffy = jiffies;
9687 9702
9688 printk(KERN_ERR 9703 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
9689 "BUG: sleeping function called from invalid context at %s:%d\n", 9704 file, line);
9690 file, line); 9705 pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
9691 printk(KERN_ERR 9706 in_atomic(), irqs_disabled(),
9692 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 9707 current->pid, current->comm);
9693 in_atomic(), irqs_disabled(),
9694 current->pid, current->comm);
9695 9708
9696 debug_show_held_locks(current); 9709 debug_show_held_locks(current);
9697 if (irqs_disabled()) 9710 if (irqs_disabled())
@@ -10083,7 +10096,7 @@ void sched_move_task(struct task_struct *tsk)
10083 10096
10084#ifdef CONFIG_FAIR_GROUP_SCHED 10097#ifdef CONFIG_FAIR_GROUP_SCHED
10085 if (tsk->sched_class->moved_group) 10098 if (tsk->sched_class->moved_group)
10086 tsk->sched_class->moved_group(tsk); 10099 tsk->sched_class->moved_group(tsk, on_rq);
10087#endif 10100#endif
10088 10101
10089 if (unlikely(running)) 10102 if (unlikely(running))
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 479ce5682d7c..5b496132c28a 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -236,6 +236,18 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
236} 236}
237EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 237EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
238 238
239unsigned long long cpu_clock(int cpu)
240{
241 unsigned long long clock;
242 unsigned long flags;
243
244 local_irq_save(flags);
245 clock = sched_clock_cpu(cpu);
246 local_irq_restore(flags);
247
248 return clock;
249}
250
239#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 251#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
240 252
241void sched_clock_init(void) 253void sched_clock_init(void)
@@ -251,17 +263,12 @@ u64 sched_clock_cpu(int cpu)
251 return sched_clock(); 263 return sched_clock();
252} 264}
253 265
254#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
255 266
256unsigned long long cpu_clock(int cpu) 267unsigned long long cpu_clock(int cpu)
257{ 268{
258 unsigned long long clock; 269 return sched_clock_cpu(cpu);
259 unsigned long flags; 270}
260 271
261 local_irq_save(flags); 272#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
262 clock = sched_clock_cpu(cpu);
263 local_irq_restore(flags);
264 273
265 return clock;
266}
267EXPORT_SYMBOL_GPL(cpu_clock); 274EXPORT_SYMBOL_GPL(cpu_clock);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5bedf6e3ebf3..42ac3c9f66f6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -510,6 +510,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
510 curr->sum_exec_runtime += delta_exec; 510 curr->sum_exec_runtime += delta_exec;
511 schedstat_add(cfs_rq, exec_clock, delta_exec); 511 schedstat_add(cfs_rq, exec_clock, delta_exec);
512 delta_exec_weighted = calc_delta_fair(delta_exec, curr); 512 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
513
513 curr->vruntime += delta_exec_weighted; 514 curr->vruntime += delta_exec_weighted;
514 update_min_vruntime(cfs_rq); 515 update_min_vruntime(cfs_rq);
515} 516}
@@ -765,16 +766,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
765 se->vruntime = vruntime; 766 se->vruntime = vruntime;
766} 767}
767 768
769#define ENQUEUE_WAKEUP 1
770#define ENQUEUE_MIGRATE 2
771
768static void 772static void
769enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) 773enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
770{ 774{
771 /* 775 /*
776 * Update the normalized vruntime before updating min_vruntime
777 * through callig update_curr().
778 */
779 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
780 se->vruntime += cfs_rq->min_vruntime;
781
782 /*
772 * Update run-time statistics of the 'current'. 783 * Update run-time statistics of the 'current'.
773 */ 784 */
774 update_curr(cfs_rq); 785 update_curr(cfs_rq);
775 account_entity_enqueue(cfs_rq, se); 786 account_entity_enqueue(cfs_rq, se);
776 787
777 if (wakeup) { 788 if (flags & ENQUEUE_WAKEUP) {
778 place_entity(cfs_rq, se, 0); 789 place_entity(cfs_rq, se, 0);
779 enqueue_sleeper(cfs_rq, se); 790 enqueue_sleeper(cfs_rq, se);
780 } 791 }
@@ -828,6 +839,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
828 __dequeue_entity(cfs_rq, se); 839 __dequeue_entity(cfs_rq, se);
829 account_entity_dequeue(cfs_rq, se); 840 account_entity_dequeue(cfs_rq, se);
830 update_min_vruntime(cfs_rq); 841 update_min_vruntime(cfs_rq);
842
843 /*
844 * Normalize the entity after updating the min_vruntime because the
845 * update can refer to the ->curr item and we need to reflect this
846 * movement in our normalized position.
847 */
848 if (!sleep)
849 se->vruntime -= cfs_rq->min_vruntime;
831} 850}
832 851
833/* 852/*
@@ -1038,13 +1057,19 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
1038{ 1057{
1039 struct cfs_rq *cfs_rq; 1058 struct cfs_rq *cfs_rq;
1040 struct sched_entity *se = &p->se; 1059 struct sched_entity *se = &p->se;
1060 int flags = 0;
1061
1062 if (wakeup)
1063 flags |= ENQUEUE_WAKEUP;
1064 if (p->state == TASK_WAKING)
1065 flags |= ENQUEUE_MIGRATE;
1041 1066
1042 for_each_sched_entity(se) { 1067 for_each_sched_entity(se) {
1043 if (se->on_rq) 1068 if (se->on_rq)
1044 break; 1069 break;
1045 cfs_rq = cfs_rq_of(se); 1070 cfs_rq = cfs_rq_of(se);
1046 enqueue_entity(cfs_rq, se, wakeup); 1071 enqueue_entity(cfs_rq, se, flags);
1047 wakeup = 1; 1072 flags = ENQUEUE_WAKEUP;
1048 } 1073 }
1049 1074
1050 hrtick_update(rq); 1075 hrtick_update(rq);
@@ -1120,6 +1145,14 @@ static void yield_task_fair(struct rq *rq)
1120 1145
1121#ifdef CONFIG_SMP 1146#ifdef CONFIG_SMP
1122 1147
1148static void task_waking_fair(struct rq *rq, struct task_struct *p)
1149{
1150 struct sched_entity *se = &p->se;
1151 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1152
1153 se->vruntime -= cfs_rq->min_vruntime;
1154}
1155
1123#ifdef CONFIG_FAIR_GROUP_SCHED 1156#ifdef CONFIG_FAIR_GROUP_SCHED
1124/* 1157/*
1125 * effective_load() calculates the load change as seen from the root_task_group 1158 * effective_load() calculates the load change as seen from the root_task_group
@@ -1429,6 +1462,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1429 } 1462 }
1430 1463
1431 for_each_domain(cpu, tmp) { 1464 for_each_domain(cpu, tmp) {
1465 if (!(tmp->flags & SD_LOAD_BALANCE))
1466 continue;
1467
1432 /* 1468 /*
1433 * If power savings logic is enabled for a domain, see if we 1469 * If power savings logic is enabled for a domain, see if we
1434 * are not overloaded, if so, don't balance wider. 1470 * are not overloaded, if so, don't balance wider.
@@ -1975,6 +2011,8 @@ static void task_fork_fair(struct task_struct *p)
1975 resched_task(rq->curr); 2011 resched_task(rq->curr);
1976 } 2012 }
1977 2013
2014 se->vruntime -= cfs_rq->min_vruntime;
2015
1978 raw_spin_unlock_irqrestore(&rq->lock, flags); 2016 raw_spin_unlock_irqrestore(&rq->lock, flags);
1979} 2017}
1980 2018
@@ -2028,12 +2066,13 @@ static void set_curr_task_fair(struct rq *rq)
2028} 2066}
2029 2067
2030#ifdef CONFIG_FAIR_GROUP_SCHED 2068#ifdef CONFIG_FAIR_GROUP_SCHED
2031static void moved_group_fair(struct task_struct *p) 2069static void moved_group_fair(struct task_struct *p, int on_rq)
2032{ 2070{
2033 struct cfs_rq *cfs_rq = task_cfs_rq(p); 2071 struct cfs_rq *cfs_rq = task_cfs_rq(p);
2034 2072
2035 update_curr(cfs_rq); 2073 update_curr(cfs_rq);
2036 place_entity(cfs_rq, &p->se, 1); 2074 if (!on_rq)
2075 place_entity(cfs_rq, &p->se, 1);
2037} 2076}
2038#endif 2077#endif
2039 2078
@@ -2073,6 +2112,8 @@ static const struct sched_class fair_sched_class = {
2073 .move_one_task = move_one_task_fair, 2112 .move_one_task = move_one_task_fair,
2074 .rq_online = rq_online_fair, 2113 .rq_online = rq_online_fair,
2075 .rq_offline = rq_offline_fair, 2114 .rq_offline = rq_offline_fair,
2115
2116 .task_waking = task_waking_fair,
2076#endif 2117#endif
2077 2118
2078 .set_curr_task = set_curr_task_fair, 2119 .set_curr_task = set_curr_task_fair,
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 5f93b570d383..21b969a28725 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -35,7 +35,7 @@ static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
36{ 36{
37 raw_spin_unlock_irq(&rq->lock); 37 raw_spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 38 pr_err("bad: scheduling from the idle thread!\n");
39 dump_stack(); 39 dump_stack();
40 raw_spin_lock_irq(&rq->lock); 40 raw_spin_lock_irq(&rq->lock);
41} 41}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d2ea2828164e..f48328ac216f 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1472,7 +1472,7 @@ static void post_schedule_rt(struct rq *rq)
1472 * If we are not running and we are not going to reschedule soon, we should 1472 * If we are not running and we are not going to reschedule soon, we should
1473 * try to push tasks away now 1473 * try to push tasks away now
1474 */ 1474 */
1475static void task_wake_up_rt(struct rq *rq, struct task_struct *p) 1475static void task_woken_rt(struct rq *rq, struct task_struct *p)
1476{ 1476{
1477 if (!task_running(rq, p) && 1477 if (!task_running(rq, p) &&
1478 !test_tsk_need_resched(rq->curr) && 1478 !test_tsk_need_resched(rq->curr) &&
@@ -1753,7 +1753,7 @@ static const struct sched_class rt_sched_class = {
1753 .rq_offline = rq_offline_rt, 1753 .rq_offline = rq_offline_rt,
1754 .pre_schedule = pre_schedule_rt, 1754 .pre_schedule = pre_schedule_rt,
1755 .post_schedule = post_schedule_rt, 1755 .post_schedule = post_schedule_rt,
1756 .task_wake_up = task_wake_up_rt, 1756 .task_woken = task_woken_rt,
1757 .switched_from = switched_from_rt, 1757 .switched_from = switched_from_rt,
1758#endif 1758#endif
1759 1759