aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c726
1 files changed, 263 insertions, 463 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5cd607ec8405..1d93cd0ae4d3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -55,9 +55,9 @@
55#include <linux/cpu.h> 55#include <linux/cpu.h>
56#include <linux/cpuset.h> 56#include <linux/cpuset.h>
57#include <linux/percpu.h> 57#include <linux/percpu.h>
58#include <linux/kthread.h>
59#include <linux/proc_fs.h> 58#include <linux/proc_fs.h>
60#include <linux/seq_file.h> 59#include <linux/seq_file.h>
60#include <linux/stop_machine.h>
61#include <linux/sysctl.h> 61#include <linux/sysctl.h>
62#include <linux/syscalls.h> 62#include <linux/syscalls.h>
63#include <linux/times.h> 63#include <linux/times.h>
@@ -503,8 +503,11 @@ struct rq {
503 #define CPU_LOAD_IDX_MAX 5 503 #define CPU_LOAD_IDX_MAX 5
504 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 504 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
505#ifdef CONFIG_NO_HZ 505#ifdef CONFIG_NO_HZ
506 u64 nohz_stamp;
506 unsigned char in_nohz_recently; 507 unsigned char in_nohz_recently;
507#endif 508#endif
509 unsigned int skip_clock_update;
510
508 /* capture load from *all* tasks on this cpu: */ 511 /* capture load from *all* tasks on this cpu: */
509 struct load_weight load; 512 struct load_weight load;
510 unsigned long nr_load_updates; 513 unsigned long nr_load_updates;
@@ -546,15 +549,13 @@ struct rq {
546 int post_schedule; 549 int post_schedule;
547 int active_balance; 550 int active_balance;
548 int push_cpu; 551 int push_cpu;
552 struct cpu_stop_work active_balance_work;
549 /* cpu of this runqueue: */ 553 /* cpu of this runqueue: */
550 int cpu; 554 int cpu;
551 int online; 555 int online;
552 556
553 unsigned long avg_load_per_task; 557 unsigned long avg_load_per_task;
554 558
555 struct task_struct *migration_thread;
556 struct list_head migration_queue;
557
558 u64 rt_avg; 559 u64 rt_avg;
559 u64 age_stamp; 560 u64 age_stamp;
560 u64 idle_stamp; 561 u64 idle_stamp;
@@ -602,6 +603,13 @@ static inline
602void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 603void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
603{ 604{
604 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 605 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
606
607 /*
608 * A queue event has occurred, and we're going to schedule. In
609 * this case, we can save a useless back to back clock update.
610 */
611 if (test_tsk_need_resched(p))
612 rq->skip_clock_update = 1;
605} 613}
606 614
607static inline int cpu_of(struct rq *rq) 615static inline int cpu_of(struct rq *rq)
@@ -636,7 +644,8 @@ static inline int cpu_of(struct rq *rq)
636 644
637inline void update_rq_clock(struct rq *rq) 645inline void update_rq_clock(struct rq *rq)
638{ 646{
639 rq->clock = sched_clock_cpu(cpu_of(rq)); 647 if (!rq->skip_clock_update)
648 rq->clock = sched_clock_cpu(cpu_of(rq));
640} 649}
641 650
642/* 651/*
@@ -914,16 +923,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
914#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 923#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
915 924
916/* 925/*
917 * Check whether the task is waking, we use this to synchronize against 926 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
918 * ttwu() so that task_cpu() reports a stable number. 927 * against ttwu().
919 *
920 * We need to make an exception for PF_STARTING tasks because the fork
921 * path might require task_rq_lock() to work, eg. it can call
922 * set_cpus_allowed_ptr() from the cpuset clone_ns code.
923 */ 928 */
924static inline int task_is_waking(struct task_struct *p) 929static inline int task_is_waking(struct task_struct *p)
925{ 930{
926 return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING)); 931 return unlikely(p->state == TASK_WAKING);
927} 932}
928 933
929/* 934/*
@@ -936,11 +941,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
936 struct rq *rq; 941 struct rq *rq;
937 942
938 for (;;) { 943 for (;;) {
939 while (task_is_waking(p))
940 cpu_relax();
941 rq = task_rq(p); 944 rq = task_rq(p);
942 raw_spin_lock(&rq->lock); 945 raw_spin_lock(&rq->lock);
943 if (likely(rq == task_rq(p) && !task_is_waking(p))) 946 if (likely(rq == task_rq(p)))
944 return rq; 947 return rq;
945 raw_spin_unlock(&rq->lock); 948 raw_spin_unlock(&rq->lock);
946 } 949 }
@@ -957,12 +960,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
957 struct rq *rq; 960 struct rq *rq;
958 961
959 for (;;) { 962 for (;;) {
960 while (task_is_waking(p))
961 cpu_relax();
962 local_irq_save(*flags); 963 local_irq_save(*flags);
963 rq = task_rq(p); 964 rq = task_rq(p);
964 raw_spin_lock(&rq->lock); 965 raw_spin_lock(&rq->lock);
965 if (likely(rq == task_rq(p) && !task_is_waking(p))) 966 if (likely(rq == task_rq(p)))
966 return rq; 967 return rq;
967 raw_spin_unlock_irqrestore(&rq->lock, *flags); 968 raw_spin_unlock_irqrestore(&rq->lock, *flags);
968 } 969 }
@@ -1239,6 +1240,17 @@ void wake_up_idle_cpu(int cpu)
1239 if (!tsk_is_polling(rq->idle)) 1240 if (!tsk_is_polling(rq->idle))
1240 smp_send_reschedule(cpu); 1241 smp_send_reschedule(cpu);
1241} 1242}
1243
1244int nohz_ratelimit(int cpu)
1245{
1246 struct rq *rq = cpu_rq(cpu);
1247 u64 diff = rq->clock - rq->nohz_stamp;
1248
1249 rq->nohz_stamp = rq->clock;
1250
1251 return diff < (NSEC_PER_SEC / HZ) >> 1;
1252}
1253
1242#endif /* CONFIG_NO_HZ */ 1254#endif /* CONFIG_NO_HZ */
1243 1255
1244static u64 sched_avg_period(void) 1256static u64 sched_avg_period(void)
@@ -1781,8 +1793,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1781 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1793 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1782 } 1794 }
1783 } 1795 }
1784 update_rq_clock(rq1);
1785 update_rq_clock(rq2);
1786} 1796}
1787 1797
1788/* 1798/*
@@ -1813,7 +1823,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1813} 1823}
1814#endif 1824#endif
1815 1825
1816static void calc_load_account_active(struct rq *this_rq); 1826static void calc_load_account_idle(struct rq *this_rq);
1817static void update_sysctl(void); 1827static void update_sysctl(void);
1818static int get_update_sysctl_factor(void); 1828static int get_update_sysctl_factor(void);
1819 1829
@@ -1870,62 +1880,43 @@ static void set_load_weight(struct task_struct *p)
1870 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; 1880 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1871} 1881}
1872 1882
1873static void update_avg(u64 *avg, u64 sample) 1883static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1874{ 1884{
1875 s64 diff = sample - *avg; 1885 update_rq_clock(rq);
1876 *avg += diff >> 3;
1877}
1878
1879static void
1880enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1881{
1882 if (wakeup)
1883 p->se.start_runtime = p->se.sum_exec_runtime;
1884
1885 sched_info_queued(p); 1886 sched_info_queued(p);
1886 p->sched_class->enqueue_task(rq, p, wakeup, head); 1887 p->sched_class->enqueue_task(rq, p, flags);
1887 p->se.on_rq = 1; 1888 p->se.on_rq = 1;
1888} 1889}
1889 1890
1890static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1891static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1891{ 1892{
1892 if (sleep) { 1893 update_rq_clock(rq);
1893 if (p->se.last_wakeup) {
1894 update_avg(&p->se.avg_overlap,
1895 p->se.sum_exec_runtime - p->se.last_wakeup);
1896 p->se.last_wakeup = 0;
1897 } else {
1898 update_avg(&p->se.avg_wakeup,
1899 sysctl_sched_wakeup_granularity);
1900 }
1901 }
1902
1903 sched_info_dequeued(p); 1894 sched_info_dequeued(p);
1904 p->sched_class->dequeue_task(rq, p, sleep); 1895 p->sched_class->dequeue_task(rq, p, flags);
1905 p->se.on_rq = 0; 1896 p->se.on_rq = 0;
1906} 1897}
1907 1898
1908/* 1899/*
1909 * activate_task - move a task to the runqueue. 1900 * activate_task - move a task to the runqueue.
1910 */ 1901 */
1911static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 1902static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1912{ 1903{
1913 if (task_contributes_to_load(p)) 1904 if (task_contributes_to_load(p))
1914 rq->nr_uninterruptible--; 1905 rq->nr_uninterruptible--;
1915 1906
1916 enqueue_task(rq, p, wakeup, false); 1907 enqueue_task(rq, p, flags);
1917 inc_nr_running(rq); 1908 inc_nr_running(rq);
1918} 1909}
1919 1910
1920/* 1911/*
1921 * deactivate_task - remove a task from the runqueue. 1912 * deactivate_task - remove a task from the runqueue.
1922 */ 1913 */
1923static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 1914static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1924{ 1915{
1925 if (task_contributes_to_load(p)) 1916 if (task_contributes_to_load(p))
1926 rq->nr_uninterruptible++; 1917 rq->nr_uninterruptible++;
1927 1918
1928 dequeue_task(rq, p, sleep); 1919 dequeue_task(rq, p, flags);
1929 dec_nr_running(rq); 1920 dec_nr_running(rq);
1930} 1921}
1931 1922
@@ -2054,21 +2045,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2054 __set_task_cpu(p, new_cpu); 2045 __set_task_cpu(p, new_cpu);
2055} 2046}
2056 2047
2057struct migration_req { 2048struct migration_arg {
2058 struct list_head list;
2059
2060 struct task_struct *task; 2049 struct task_struct *task;
2061 int dest_cpu; 2050 int dest_cpu;
2062
2063 struct completion done;
2064}; 2051};
2065 2052
2053static int migration_cpu_stop(void *data);
2054
2066/* 2055/*
2067 * The task's runqueue lock must be held. 2056 * The task's runqueue lock must be held.
2068 * Returns true if you have to wait for migration thread. 2057 * Returns true if you have to wait for migration thread.
2069 */ 2058 */
2070static int 2059static bool migrate_task(struct task_struct *p, int dest_cpu)
2071migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2072{ 2060{
2073 struct rq *rq = task_rq(p); 2061 struct rq *rq = task_rq(p);
2074 2062
@@ -2076,15 +2064,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2076 * If the task is not on a runqueue (and not running), then 2064 * If the task is not on a runqueue (and not running), then
2077 * the next wake-up will properly place the task. 2065 * the next wake-up will properly place the task.
2078 */ 2066 */
2079 if (!p->se.on_rq && !task_running(rq, p)) 2067 return p->se.on_rq || task_running(rq, p);
2080 return 0;
2081
2082 init_completion(&req->done);
2083 req->task = p;
2084 req->dest_cpu = dest_cpu;
2085 list_add(&req->list, &rq->migration_queue);
2086
2087 return 1;
2088} 2068}
2089 2069
2090/* 2070/*
@@ -2142,7 +2122,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2142 * just go back and repeat. 2122 * just go back and repeat.
2143 */ 2123 */
2144 rq = task_rq_lock(p, &flags); 2124 rq = task_rq_lock(p, &flags);
2145 trace_sched_wait_task(rq, p); 2125 trace_sched_wait_task(p);
2146 running = task_running(rq, p); 2126 running = task_running(rq, p);
2147 on_rq = p->se.on_rq; 2127 on_rq = p->se.on_rq;
2148 ncsw = 0; 2128 ncsw = 0;
@@ -2240,6 +2220,9 @@ void task_oncpu_function_call(struct task_struct *p,
2240} 2220}
2241 2221
2242#ifdef CONFIG_SMP 2222#ifdef CONFIG_SMP
2223/*
2224 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2225 */
2243static int select_fallback_rq(int cpu, struct task_struct *p) 2226static int select_fallback_rq(int cpu, struct task_struct *p)
2244{ 2227{
2245 int dest_cpu; 2228 int dest_cpu;
@@ -2256,12 +2239,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2256 return dest_cpu; 2239 return dest_cpu;
2257 2240
2258 /* No more Mr. Nice Guy. */ 2241 /* No more Mr. Nice Guy. */
2259 if (dest_cpu >= nr_cpu_ids) { 2242 if (unlikely(dest_cpu >= nr_cpu_ids)) {
2260 rcu_read_lock(); 2243 dest_cpu = cpuset_cpus_allowed_fallback(p);
2261 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2262 rcu_read_unlock();
2263 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2264
2265 /* 2244 /*
2266 * Don't tell them about moving exiting tasks or 2245 * Don't tell them about moving exiting tasks or
2267 * kernel threads (both mm NULL), since they never 2246 * kernel threads (both mm NULL), since they never
@@ -2278,17 +2257,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2278} 2257}
2279 2258
2280/* 2259/*
2281 * Gets called from 3 sites (exec, fork, wakeup), since it is called without 2260 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
2282 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2283 * by:
2284 *
2285 * exec: is unstable, retry loop
2286 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
2287 */ 2261 */
2288static inline 2262static inline
2289int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2263int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
2290{ 2264{
2291 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2265 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
2292 2266
2293 /* 2267 /*
2294 * In order not to call set_task_cpu() on a blocking task we need 2268 * In order not to call set_task_cpu() on a blocking task we need
@@ -2306,6 +2280,12 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2306 2280
2307 return cpu; 2281 return cpu;
2308} 2282}
2283
2284static void update_avg(u64 *avg, u64 sample)
2285{
2286 s64 diff = sample - *avg;
2287 *avg += diff >> 3;
2288}
2309#endif 2289#endif
2310 2290
2311/*** 2291/***
@@ -2327,16 +2307,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2327{ 2307{
2328 int cpu, orig_cpu, this_cpu, success = 0; 2308 int cpu, orig_cpu, this_cpu, success = 0;
2329 unsigned long flags; 2309 unsigned long flags;
2310 unsigned long en_flags = ENQUEUE_WAKEUP;
2330 struct rq *rq; 2311 struct rq *rq;
2331 2312
2332 if (!sched_feat(SYNC_WAKEUPS))
2333 wake_flags &= ~WF_SYNC;
2334
2335 this_cpu = get_cpu(); 2313 this_cpu = get_cpu();
2336 2314
2337 smp_wmb(); 2315 smp_wmb();
2338 rq = task_rq_lock(p, &flags); 2316 rq = task_rq_lock(p, &flags);
2339 update_rq_clock(rq);
2340 if (!(p->state & state)) 2317 if (!(p->state & state))
2341 goto out; 2318 goto out;
2342 2319
@@ -2356,28 +2333,26 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2356 * 2333 *
2357 * First fix up the nr_uninterruptible count: 2334 * First fix up the nr_uninterruptible count:
2358 */ 2335 */
2359 if (task_contributes_to_load(p)) 2336 if (task_contributes_to_load(p)) {
2360 rq->nr_uninterruptible--; 2337 if (likely(cpu_online(orig_cpu)))
2338 rq->nr_uninterruptible--;
2339 else
2340 this_rq()->nr_uninterruptible--;
2341 }
2361 p->state = TASK_WAKING; 2342 p->state = TASK_WAKING;
2362 2343
2363 if (p->sched_class->task_waking) 2344 if (p->sched_class->task_waking) {
2364 p->sched_class->task_waking(rq, p); 2345 p->sched_class->task_waking(rq, p);
2346 en_flags |= ENQUEUE_WAKING;
2347 }
2365 2348
2366 __task_rq_unlock(rq); 2349 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2367 2350 if (cpu != orig_cpu)
2368 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2369 if (cpu != orig_cpu) {
2370 /*
2371 * Since we migrate the task without holding any rq->lock,
2372 * we need to be careful with task_rq_lock(), since that
2373 * might end up locking an invalid rq.
2374 */
2375 set_task_cpu(p, cpu); 2351 set_task_cpu(p, cpu);
2376 } 2352 __task_rq_unlock(rq);
2377 2353
2378 rq = cpu_rq(cpu); 2354 rq = cpu_rq(cpu);
2379 raw_spin_lock(&rq->lock); 2355 raw_spin_lock(&rq->lock);
2380 update_rq_clock(rq);
2381 2356
2382 /* 2357 /*
2383 * We migrated the task without holding either rq->lock, however 2358 * We migrated the task without holding either rq->lock, however
@@ -2405,36 +2380,20 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2405 2380
2406out_activate: 2381out_activate:
2407#endif /* CONFIG_SMP */ 2382#endif /* CONFIG_SMP */
2408 schedstat_inc(p, se.nr_wakeups); 2383 schedstat_inc(p, se.statistics.nr_wakeups);
2409 if (wake_flags & WF_SYNC) 2384 if (wake_flags & WF_SYNC)
2410 schedstat_inc(p, se.nr_wakeups_sync); 2385 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2411 if (orig_cpu != cpu) 2386 if (orig_cpu != cpu)
2412 schedstat_inc(p, se.nr_wakeups_migrate); 2387 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2413 if (cpu == this_cpu) 2388 if (cpu == this_cpu)
2414 schedstat_inc(p, se.nr_wakeups_local); 2389 schedstat_inc(p, se.statistics.nr_wakeups_local);
2415 else 2390 else
2416 schedstat_inc(p, se.nr_wakeups_remote); 2391 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2417 activate_task(rq, p, 1); 2392 activate_task(rq, p, en_flags);
2418 success = 1; 2393 success = 1;
2419 2394
2420 /*
2421 * Only attribute actual wakeups done by this task.
2422 */
2423 if (!in_interrupt()) {
2424 struct sched_entity *se = &current->se;
2425 u64 sample = se->sum_exec_runtime;
2426
2427 if (se->last_wakeup)
2428 sample -= se->last_wakeup;
2429 else
2430 sample -= se->start_runtime;
2431 update_avg(&se->avg_wakeup, sample);
2432
2433 se->last_wakeup = se->sum_exec_runtime;
2434 }
2435
2436out_running: 2395out_running:
2437 trace_sched_wakeup(rq, p, success); 2396 trace_sched_wakeup(p, success);
2438 check_preempt_curr(rq, p, wake_flags); 2397 check_preempt_curr(rq, p, wake_flags);
2439 2398
2440 p->state = TASK_RUNNING; 2399 p->state = TASK_RUNNING;
@@ -2494,42 +2453,9 @@ static void __sched_fork(struct task_struct *p)
2494 p->se.sum_exec_runtime = 0; 2453 p->se.sum_exec_runtime = 0;
2495 p->se.prev_sum_exec_runtime = 0; 2454 p->se.prev_sum_exec_runtime = 0;
2496 p->se.nr_migrations = 0; 2455 p->se.nr_migrations = 0;
2497 p->se.last_wakeup = 0;
2498 p->se.avg_overlap = 0;
2499 p->se.start_runtime = 0;
2500 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2501 2456
2502#ifdef CONFIG_SCHEDSTATS 2457#ifdef CONFIG_SCHEDSTATS
2503 p->se.wait_start = 0; 2458 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2504 p->se.wait_max = 0;
2505 p->se.wait_count = 0;
2506 p->se.wait_sum = 0;
2507
2508 p->se.sleep_start = 0;
2509 p->se.sleep_max = 0;
2510 p->se.sum_sleep_runtime = 0;
2511
2512 p->se.block_start = 0;
2513 p->se.block_max = 0;
2514 p->se.exec_max = 0;
2515 p->se.slice_max = 0;
2516
2517 p->se.nr_migrations_cold = 0;
2518 p->se.nr_failed_migrations_affine = 0;
2519 p->se.nr_failed_migrations_running = 0;
2520 p->se.nr_failed_migrations_hot = 0;
2521 p->se.nr_forced_migrations = 0;
2522
2523 p->se.nr_wakeups = 0;
2524 p->se.nr_wakeups_sync = 0;
2525 p->se.nr_wakeups_migrate = 0;
2526 p->se.nr_wakeups_local = 0;
2527 p->se.nr_wakeups_remote = 0;
2528 p->se.nr_wakeups_affine = 0;
2529 p->se.nr_wakeups_affine_attempts = 0;
2530 p->se.nr_wakeups_passive = 0;
2531 p->se.nr_wakeups_idle = 0;
2532
2533#endif 2459#endif
2534 2460
2535 INIT_LIST_HEAD(&p->rt.run_list); 2461 INIT_LIST_HEAD(&p->rt.run_list);
@@ -2550,11 +2476,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
2550 2476
2551 __sched_fork(p); 2477 __sched_fork(p);
2552 /* 2478 /*
2553 * We mark the process as waking here. This guarantees that 2479 * We mark the process as running here. This guarantees that
2554 * nobody will actually run it, and a signal or other external 2480 * nobody will actually run it, and a signal or other external
2555 * event cannot wake it up and insert it on the runqueue either. 2481 * event cannot wake it up and insert it on the runqueue either.
2556 */ 2482 */
2557 p->state = TASK_WAKING; 2483 p->state = TASK_RUNNING;
2558 2484
2559 /* 2485 /*
2560 * Revert to default priority/policy on fork if requested. 2486 * Revert to default priority/policy on fork if requested.
@@ -2621,31 +2547,27 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2621 int cpu __maybe_unused = get_cpu(); 2547 int cpu __maybe_unused = get_cpu();
2622 2548
2623#ifdef CONFIG_SMP 2549#ifdef CONFIG_SMP
2550 rq = task_rq_lock(p, &flags);
2551 p->state = TASK_WAKING;
2552
2624 /* 2553 /*
2625 * Fork balancing, do it here and not earlier because: 2554 * Fork balancing, do it here and not earlier because:
2626 * - cpus_allowed can change in the fork path 2555 * - cpus_allowed can change in the fork path
2627 * - any previously selected cpu might disappear through hotplug 2556 * - any previously selected cpu might disappear through hotplug
2628 * 2557 *
2629 * We still have TASK_WAKING but PF_STARTING is gone now, meaning 2558 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2630 * ->cpus_allowed is stable, we have preemption disabled, meaning 2559 * without people poking at ->cpus_allowed.
2631 * cpu_online_mask is stable.
2632 */ 2560 */
2633 cpu = select_task_rq(p, SD_BALANCE_FORK, 0); 2561 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
2634 set_task_cpu(p, cpu); 2562 set_task_cpu(p, cpu);
2635#endif
2636 2563
2637 /*
2638 * Since the task is not on the rq and we still have TASK_WAKING set
2639 * nobody else will migrate this task.
2640 */
2641 rq = cpu_rq(cpu);
2642 raw_spin_lock_irqsave(&rq->lock, flags);
2643
2644 BUG_ON(p->state != TASK_WAKING);
2645 p->state = TASK_RUNNING; 2564 p->state = TASK_RUNNING;
2646 update_rq_clock(rq); 2565 task_rq_unlock(rq, &flags);
2566#endif
2567
2568 rq = task_rq_lock(p, &flags);
2647 activate_task(rq, p, 0); 2569 activate_task(rq, p, 0);
2648 trace_sched_wakeup_new(rq, p, 1); 2570 trace_sched_wakeup_new(p, 1);
2649 check_preempt_curr(rq, p, WF_FORK); 2571 check_preempt_curr(rq, p, WF_FORK);
2650#ifdef CONFIG_SMP 2572#ifdef CONFIG_SMP
2651 if (p->sched_class->task_woken) 2573 if (p->sched_class->task_woken)
@@ -2865,7 +2787,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2865 struct mm_struct *mm, *oldmm; 2787 struct mm_struct *mm, *oldmm;
2866 2788
2867 prepare_task_switch(rq, prev, next); 2789 prepare_task_switch(rq, prev, next);
2868 trace_sched_switch(rq, prev, next); 2790 trace_sched_switch(prev, next);
2869 mm = next->mm; 2791 mm = next->mm;
2870 oldmm = prev->active_mm; 2792 oldmm = prev->active_mm;
2871 /* 2793 /*
@@ -2982,6 +2904,61 @@ static unsigned long calc_load_update;
2982unsigned long avenrun[3]; 2904unsigned long avenrun[3];
2983EXPORT_SYMBOL(avenrun); 2905EXPORT_SYMBOL(avenrun);
2984 2906
2907static long calc_load_fold_active(struct rq *this_rq)
2908{
2909 long nr_active, delta = 0;
2910
2911 nr_active = this_rq->nr_running;
2912 nr_active += (long) this_rq->nr_uninterruptible;
2913
2914 if (nr_active != this_rq->calc_load_active) {
2915 delta = nr_active - this_rq->calc_load_active;
2916 this_rq->calc_load_active = nr_active;
2917 }
2918
2919 return delta;
2920}
2921
2922#ifdef CONFIG_NO_HZ
2923/*
2924 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2925 *
2926 * When making the ILB scale, we should try to pull this in as well.
2927 */
2928static atomic_long_t calc_load_tasks_idle;
2929
2930static void calc_load_account_idle(struct rq *this_rq)
2931{
2932 long delta;
2933
2934 delta = calc_load_fold_active(this_rq);
2935 if (delta)
2936 atomic_long_add(delta, &calc_load_tasks_idle);
2937}
2938
2939static long calc_load_fold_idle(void)
2940{
2941 long delta = 0;
2942
2943 /*
2944 * Its got a race, we don't care...
2945 */
2946 if (atomic_long_read(&calc_load_tasks_idle))
2947 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
2948
2949 return delta;
2950}
2951#else
2952static void calc_load_account_idle(struct rq *this_rq)
2953{
2954}
2955
2956static inline long calc_load_fold_idle(void)
2957{
2958 return 0;
2959}
2960#endif
2961
2985/** 2962/**
2986 * get_avenrun - get the load average array 2963 * get_avenrun - get the load average array
2987 * @loads: pointer to dest load array 2964 * @loads: pointer to dest load array
@@ -3028,20 +3005,22 @@ void calc_global_load(void)
3028} 3005}
3029 3006
3030/* 3007/*
3031 * Either called from update_cpu_load() or from a cpu going idle 3008 * Called from update_cpu_load() to periodically update this CPU's
3009 * active count.
3032 */ 3010 */
3033static void calc_load_account_active(struct rq *this_rq) 3011static void calc_load_account_active(struct rq *this_rq)
3034{ 3012{
3035 long nr_active, delta; 3013 long delta;
3036 3014
3037 nr_active = this_rq->nr_running; 3015 if (time_before(jiffies, this_rq->calc_load_update))
3038 nr_active += (long) this_rq->nr_uninterruptible; 3016 return;
3039 3017
3040 if (nr_active != this_rq->calc_load_active) { 3018 delta = calc_load_fold_active(this_rq);
3041 delta = nr_active - this_rq->calc_load_active; 3019 delta += calc_load_fold_idle();
3042 this_rq->calc_load_active = nr_active; 3020 if (delta)
3043 atomic_long_add(delta, &calc_load_tasks); 3021 atomic_long_add(delta, &calc_load_tasks);
3044 } 3022
3023 this_rq->calc_load_update += LOAD_FREQ;
3045} 3024}
3046 3025
3047/* 3026/*
@@ -3073,10 +3052,7 @@ static void update_cpu_load(struct rq *this_rq)
3073 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; 3052 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
3074 } 3053 }
3075 3054
3076 if (time_after_eq(jiffies, this_rq->calc_load_update)) { 3055 calc_load_account_active(this_rq);
3077 this_rq->calc_load_update += LOAD_FREQ;
3078 calc_load_account_active(this_rq);
3079 }
3080} 3056}
3081 3057
3082#ifdef CONFIG_SMP 3058#ifdef CONFIG_SMP
@@ -3088,44 +3064,27 @@ static void update_cpu_load(struct rq *this_rq)
3088void sched_exec(void) 3064void sched_exec(void)
3089{ 3065{
3090 struct task_struct *p = current; 3066 struct task_struct *p = current;
3091 struct migration_req req;
3092 int dest_cpu, this_cpu;
3093 unsigned long flags; 3067 unsigned long flags;
3094 struct rq *rq; 3068 struct rq *rq;
3095 3069 int dest_cpu;
3096again:
3097 this_cpu = get_cpu();
3098 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3099 if (dest_cpu == this_cpu) {
3100 put_cpu();
3101 return;
3102 }
3103 3070
3104 rq = task_rq_lock(p, &flags); 3071 rq = task_rq_lock(p, &flags);
3105 put_cpu(); 3072 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3073 if (dest_cpu == smp_processor_id())
3074 goto unlock;
3106 3075
3107 /* 3076 /*
3108 * select_task_rq() can race against ->cpus_allowed 3077 * select_task_rq() can race against ->cpus_allowed
3109 */ 3078 */
3110 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3079 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3111 || unlikely(!cpu_active(dest_cpu))) { 3080 likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
3112 task_rq_unlock(rq, &flags); 3081 struct migration_arg arg = { p, dest_cpu };
3113 goto again;
3114 }
3115 3082
3116 /* force the process onto the specified CPU */
3117 if (migrate_task(p, dest_cpu, &req)) {
3118 /* Need to wait for migration thread (might exit: take ref). */
3119 struct task_struct *mt = rq->migration_thread;
3120
3121 get_task_struct(mt);
3122 task_rq_unlock(rq, &flags); 3083 task_rq_unlock(rq, &flags);
3123 wake_up_process(mt); 3084 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
3124 put_task_struct(mt);
3125 wait_for_completion(&req.done);
3126
3127 return; 3085 return;
3128 } 3086 }
3087unlock:
3129 task_rq_unlock(rq, &flags); 3088 task_rq_unlock(rq, &flags);
3130} 3089}
3131 3090
@@ -3597,23 +3556,9 @@ static inline void schedule_debug(struct task_struct *prev)
3597 3556
3598static void put_prev_task(struct rq *rq, struct task_struct *prev) 3557static void put_prev_task(struct rq *rq, struct task_struct *prev)
3599{ 3558{
3600 if (prev->state == TASK_RUNNING) { 3559 if (prev->se.on_rq)
3601 u64 runtime = prev->se.sum_exec_runtime; 3560 update_rq_clock(rq);
3602 3561 rq->skip_clock_update = 0;
3603 runtime -= prev->se.prev_sum_exec_runtime;
3604 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
3605
3606 /*
3607 * In order to avoid avg_overlap growing stale when we are
3608 * indeed overlapping and hence not getting put to sleep, grow
3609 * the avg_overlap on preemption.
3610 *
3611 * We use the average preemption runtime because that
3612 * correlates to the amount of cache footprint a task can
3613 * build up.
3614 */
3615 update_avg(&prev->se.avg_overlap, runtime);
3616 }
3617 prev->sched_class->put_prev_task(rq, prev); 3562 prev->sched_class->put_prev_task(rq, prev);
3618} 3563}
3619 3564
@@ -3676,14 +3621,13 @@ need_resched_nonpreemptible:
3676 hrtick_clear(rq); 3621 hrtick_clear(rq);
3677 3622
3678 raw_spin_lock_irq(&rq->lock); 3623 raw_spin_lock_irq(&rq->lock);
3679 update_rq_clock(rq);
3680 clear_tsk_need_resched(prev); 3624 clear_tsk_need_resched(prev);
3681 3625
3682 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3626 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3683 if (unlikely(signal_pending_state(prev->state, prev))) 3627 if (unlikely(signal_pending_state(prev->state, prev)))
3684 prev->state = TASK_RUNNING; 3628 prev->state = TASK_RUNNING;
3685 else 3629 else
3686 deactivate_task(rq, prev, 1); 3630 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3687 switch_count = &prev->nvcsw; 3631 switch_count = &prev->nvcsw;
3688 } 3632 }
3689 3633
@@ -4006,8 +3950,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4006 if (!x->done) { 3950 if (!x->done) {
4007 DECLARE_WAITQUEUE(wait, current); 3951 DECLARE_WAITQUEUE(wait, current);
4008 3952
4009 wait.flags |= WQ_FLAG_EXCLUSIVE; 3953 __add_wait_queue_tail_exclusive(&x->wait, &wait);
4010 __add_wait_queue_tail(&x->wait, &wait);
4011 do { 3954 do {
4012 if (signal_pending_state(state, current)) { 3955 if (signal_pending_state(state, current)) {
4013 timeout = -ERESTARTSYS; 3956 timeout = -ERESTARTSYS;
@@ -4233,7 +4176,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4233 BUG_ON(prio < 0 || prio > MAX_PRIO); 4176 BUG_ON(prio < 0 || prio > MAX_PRIO);
4234 4177
4235 rq = task_rq_lock(p, &flags); 4178 rq = task_rq_lock(p, &flags);
4236 update_rq_clock(rq);
4237 4179
4238 oldprio = p->prio; 4180 oldprio = p->prio;
4239 prev_class = p->sched_class; 4181 prev_class = p->sched_class;
@@ -4254,7 +4196,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4254 if (running) 4196 if (running)
4255 p->sched_class->set_curr_task(rq); 4197 p->sched_class->set_curr_task(rq);
4256 if (on_rq) { 4198 if (on_rq) {
4257 enqueue_task(rq, p, 0, oldprio < prio); 4199 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4258 4200
4259 check_class_changed(rq, p, prev_class, oldprio, running); 4201 check_class_changed(rq, p, prev_class, oldprio, running);
4260 } 4202 }
@@ -4276,7 +4218,6 @@ void set_user_nice(struct task_struct *p, long nice)
4276 * the task might be in the middle of scheduling on another CPU. 4218 * the task might be in the middle of scheduling on another CPU.
4277 */ 4219 */
4278 rq = task_rq_lock(p, &flags); 4220 rq = task_rq_lock(p, &flags);
4279 update_rq_clock(rq);
4280 /* 4221 /*
4281 * The RT priorities are set via sched_setscheduler(), but we still 4222 * The RT priorities are set via sched_setscheduler(), but we still
4282 * allow the 'normal' nice value to be set - but as expected 4223 * allow the 'normal' nice value to be set - but as expected
@@ -4298,7 +4239,7 @@ void set_user_nice(struct task_struct *p, long nice)
4298 delta = p->prio - old_prio; 4239 delta = p->prio - old_prio;
4299 4240
4300 if (on_rq) { 4241 if (on_rq) {
4301 enqueue_task(rq, p, 0, false); 4242 enqueue_task(rq, p, 0);
4302 /* 4243 /*
4303 * If the task increased its priority or is running and 4244 * If the task increased its priority or is running and
4304 * lowered its priority, then reschedule its CPU: 4245 * lowered its priority, then reschedule its CPU:
@@ -4559,7 +4500,6 @@ recheck:
4559 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4500 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4560 goto recheck; 4501 goto recheck;
4561 } 4502 }
4562 update_rq_clock(rq);
4563 on_rq = p->se.on_rq; 4503 on_rq = p->se.on_rq;
4564 running = task_current(rq, p); 4504 running = task_current(rq, p);
4565 if (on_rq) 4505 if (on_rq)
@@ -5296,17 +5236,15 @@ static inline void sched_init_granularity(void)
5296/* 5236/*
5297 * This is how migration works: 5237 * This is how migration works:
5298 * 5238 *
5299 * 1) we queue a struct migration_req structure in the source CPU's 5239 * 1) we invoke migration_cpu_stop() on the target CPU using
5300 * runqueue and wake up that CPU's migration thread. 5240 * stop_one_cpu().
5301 * 2) we down() the locked semaphore => thread blocks. 5241 * 2) stopper starts to run (implicitly forcing the migrated thread
5302 * 3) migration thread wakes up (implicitly it forces the migrated 5242 * off the CPU)
5303 * thread off the CPU) 5243 * 3) it checks whether the migrated task is still in the wrong runqueue.
5304 * 4) it gets the migration request and checks whether the migrated 5244 * 4) if it's in the wrong runqueue then the migration thread removes
5305 * task is still in the wrong runqueue.
5306 * 5) if it's in the wrong runqueue then the migration thread removes
5307 * it and puts it into the right queue. 5245 * it and puts it into the right queue.
5308 * 6) migration thread up()s the semaphore. 5246 * 5) stopper completes and stop_one_cpu() returns and the migration
5309 * 7) we wake up and the migration is done. 5247 * is done.
5310 */ 5248 */
5311 5249
5312/* 5250/*
@@ -5320,12 +5258,23 @@ static inline void sched_init_granularity(void)
5320 */ 5258 */
5321int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 5259int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5322{ 5260{
5323 struct migration_req req;
5324 unsigned long flags; 5261 unsigned long flags;
5325 struct rq *rq; 5262 struct rq *rq;
5263 unsigned int dest_cpu;
5326 int ret = 0; 5264 int ret = 0;
5327 5265
5266 /*
5267 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5268 * drop the rq->lock and still rely on ->cpus_allowed.
5269 */
5270again:
5271 while (task_is_waking(p))
5272 cpu_relax();
5328 rq = task_rq_lock(p, &flags); 5273 rq = task_rq_lock(p, &flags);
5274 if (task_is_waking(p)) {
5275 task_rq_unlock(rq, &flags);
5276 goto again;
5277 }
5329 5278
5330 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 5279 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5331 ret = -EINVAL; 5280 ret = -EINVAL;
@@ -5349,15 +5298,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5349 if (cpumask_test_cpu(task_cpu(p), new_mask)) 5298 if (cpumask_test_cpu(task_cpu(p), new_mask))
5350 goto out; 5299 goto out;
5351 5300
5352 if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { 5301 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5302 if (migrate_task(p, dest_cpu)) {
5303 struct migration_arg arg = { p, dest_cpu };
5353 /* Need help from migration thread: drop lock and wait. */ 5304 /* Need help from migration thread: drop lock and wait. */
5354 struct task_struct *mt = rq->migration_thread;
5355
5356 get_task_struct(mt);
5357 task_rq_unlock(rq, &flags); 5305 task_rq_unlock(rq, &flags);
5358 wake_up_process(mt); 5306 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5359 put_task_struct(mt);
5360 wait_for_completion(&req.done);
5361 tlb_migrate_finish(p->mm); 5307 tlb_migrate_finish(p->mm);
5362 return 0; 5308 return 0;
5363 } 5309 }
@@ -5415,98 +5361,49 @@ fail:
5415 return ret; 5361 return ret;
5416} 5362}
5417 5363
5418#define RCU_MIGRATION_IDLE 0
5419#define RCU_MIGRATION_NEED_QS 1
5420#define RCU_MIGRATION_GOT_QS 2
5421#define RCU_MIGRATION_MUST_SYNC 3
5422
5423/* 5364/*
5424 * migration_thread - this is a highprio system thread that performs 5365 * migration_cpu_stop - this will be executed by a highprio stopper thread
5425 * thread migration by bumping thread off CPU then 'pushing' onto 5366 * and performs thread migration by bumping thread off CPU then
5426 * another runqueue. 5367 * 'pushing' onto another runqueue.
5427 */ 5368 */
5428static int migration_thread(void *data) 5369static int migration_cpu_stop(void *data)
5429{ 5370{
5430 int badcpu; 5371 struct migration_arg *arg = data;
5431 int cpu = (long)data;
5432 struct rq *rq;
5433
5434 rq = cpu_rq(cpu);
5435 BUG_ON(rq->migration_thread != current);
5436
5437 set_current_state(TASK_INTERRUPTIBLE);
5438 while (!kthread_should_stop()) {
5439 struct migration_req *req;
5440 struct list_head *head;
5441
5442 raw_spin_lock_irq(&rq->lock);
5443
5444 if (cpu_is_offline(cpu)) {
5445 raw_spin_unlock_irq(&rq->lock);
5446 break;
5447 }
5448
5449 if (rq->active_balance) {
5450 active_load_balance(rq, cpu);
5451 rq->active_balance = 0;
5452 }
5453
5454 head = &rq->migration_queue;
5455
5456 if (list_empty(head)) {
5457 raw_spin_unlock_irq(&rq->lock);
5458 schedule();
5459 set_current_state(TASK_INTERRUPTIBLE);
5460 continue;
5461 }
5462 req = list_entry(head->next, struct migration_req, list);
5463 list_del_init(head->next);
5464
5465 if (req->task != NULL) {
5466 raw_spin_unlock(&rq->lock);
5467 __migrate_task(req->task, cpu, req->dest_cpu);
5468 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
5469 req->dest_cpu = RCU_MIGRATION_GOT_QS;
5470 raw_spin_unlock(&rq->lock);
5471 } else {
5472 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
5473 raw_spin_unlock(&rq->lock);
5474 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
5475 }
5476 local_irq_enable();
5477
5478 complete(&req->done);
5479 }
5480 __set_current_state(TASK_RUNNING);
5481
5482 return 0;
5483}
5484
5485#ifdef CONFIG_HOTPLUG_CPU
5486
5487static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5488{
5489 int ret;
5490 5372
5373 /*
5374 * The original target cpu might have gone down and we might
5375 * be on another cpu but it doesn't matter.
5376 */
5491 local_irq_disable(); 5377 local_irq_disable();
5492 ret = __migrate_task(p, src_cpu, dest_cpu); 5378 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5493 local_irq_enable(); 5379 local_irq_enable();
5494 return ret; 5380 return 0;
5495} 5381}
5496 5382
5383#ifdef CONFIG_HOTPLUG_CPU
5497/* 5384/*
5498 * Figure out where task on dead CPU should go, use force if necessary. 5385 * Figure out where task on dead CPU should go, use force if necessary.
5499 */ 5386 */
5500static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 5387void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5501{ 5388{
5502 int dest_cpu; 5389 struct rq *rq = cpu_rq(dead_cpu);
5390 int needs_cpu, uninitialized_var(dest_cpu);
5391 unsigned long flags;
5503 5392
5504again: 5393 local_irq_save(flags);
5505 dest_cpu = select_fallback_rq(dead_cpu, p);
5506 5394
5507 /* It can have affinity changed while we were choosing. */ 5395 raw_spin_lock(&rq->lock);
5508 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 5396 needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
5509 goto again; 5397 if (needs_cpu)
5398 dest_cpu = select_fallback_rq(dead_cpu, p);
5399 raw_spin_unlock(&rq->lock);
5400 /*
5401 * It can only fail if we race with set_cpus_allowed(),
5402 * in the racer should migrate the task anyway.
5403 */
5404 if (needs_cpu)
5405 __migrate_task(p, dead_cpu, dest_cpu);
5406 local_irq_restore(flags);
5510} 5407}
5511 5408
5512/* 5409/*
@@ -5570,7 +5467,6 @@ void sched_idle_next(void)
5570 5467
5571 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5468 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5572 5469
5573 update_rq_clock(rq);
5574 activate_task(rq, p, 0); 5470 activate_task(rq, p, 0);
5575 5471
5576 raw_spin_unlock_irqrestore(&rq->lock, flags); 5472 raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -5625,7 +5521,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5625 for ( ; ; ) { 5521 for ( ; ; ) {
5626 if (!rq->nr_running) 5522 if (!rq->nr_running)
5627 break; 5523 break;
5628 update_rq_clock(rq);
5629 next = pick_next_task(rq); 5524 next = pick_next_task(rq);
5630 if (!next) 5525 if (!next)
5631 break; 5526 break;
@@ -5848,35 +5743,20 @@ static void set_rq_offline(struct rq *rq)
5848static int __cpuinit 5743static int __cpuinit
5849migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5744migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5850{ 5745{
5851 struct task_struct *p;
5852 int cpu = (long)hcpu; 5746 int cpu = (long)hcpu;
5853 unsigned long flags; 5747 unsigned long flags;
5854 struct rq *rq; 5748 struct rq *rq = cpu_rq(cpu);
5855 5749
5856 switch (action) { 5750 switch (action) {
5857 5751
5858 case CPU_UP_PREPARE: 5752 case CPU_UP_PREPARE:
5859 case CPU_UP_PREPARE_FROZEN: 5753 case CPU_UP_PREPARE_FROZEN:
5860 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
5861 if (IS_ERR(p))
5862 return NOTIFY_BAD;
5863 kthread_bind(p, cpu);
5864 /* Must be high prio: stop_machine expects to yield to it. */
5865 rq = task_rq_lock(p, &flags);
5866 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5867 task_rq_unlock(rq, &flags);
5868 get_task_struct(p);
5869 cpu_rq(cpu)->migration_thread = p;
5870 rq->calc_load_update = calc_load_update; 5754 rq->calc_load_update = calc_load_update;
5871 break; 5755 break;
5872 5756
5873 case CPU_ONLINE: 5757 case CPU_ONLINE:
5874 case CPU_ONLINE_FROZEN: 5758 case CPU_ONLINE_FROZEN:
5875 /* Strictly unnecessary, as first user will wake it. */
5876 wake_up_process(cpu_rq(cpu)->migration_thread);
5877
5878 /* Update our root-domain */ 5759 /* Update our root-domain */
5879 rq = cpu_rq(cpu);
5880 raw_spin_lock_irqsave(&rq->lock, flags); 5760 raw_spin_lock_irqsave(&rq->lock, flags);
5881 if (rq->rd) { 5761 if (rq->rd) {
5882 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5762 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -5887,61 +5767,24 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5887 break; 5767 break;
5888 5768
5889#ifdef CONFIG_HOTPLUG_CPU 5769#ifdef CONFIG_HOTPLUG_CPU
5890 case CPU_UP_CANCELED:
5891 case CPU_UP_CANCELED_FROZEN:
5892 if (!cpu_rq(cpu)->migration_thread)
5893 break;
5894 /* Unbind it from offline cpu so it can run. Fall thru. */
5895 kthread_bind(cpu_rq(cpu)->migration_thread,
5896 cpumask_any(cpu_online_mask));
5897 kthread_stop(cpu_rq(cpu)->migration_thread);
5898 put_task_struct(cpu_rq(cpu)->migration_thread);
5899 cpu_rq(cpu)->migration_thread = NULL;
5900 break;
5901
5902 case CPU_DEAD: 5770 case CPU_DEAD:
5903 case CPU_DEAD_FROZEN: 5771 case CPU_DEAD_FROZEN:
5904 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
5905 migrate_live_tasks(cpu); 5772 migrate_live_tasks(cpu);
5906 rq = cpu_rq(cpu);
5907 kthread_stop(rq->migration_thread);
5908 put_task_struct(rq->migration_thread);
5909 rq->migration_thread = NULL;
5910 /* Idle task back to normal (off runqueue, low prio) */ 5773 /* Idle task back to normal (off runqueue, low prio) */
5911 raw_spin_lock_irq(&rq->lock); 5774 raw_spin_lock_irq(&rq->lock);
5912 update_rq_clock(rq);
5913 deactivate_task(rq, rq->idle, 0); 5775 deactivate_task(rq, rq->idle, 0);
5914 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 5776 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5915 rq->idle->sched_class = &idle_sched_class; 5777 rq->idle->sched_class = &idle_sched_class;
5916 migrate_dead_tasks(cpu); 5778 migrate_dead_tasks(cpu);
5917 raw_spin_unlock_irq(&rq->lock); 5779 raw_spin_unlock_irq(&rq->lock);
5918 cpuset_unlock();
5919 migrate_nr_uninterruptible(rq); 5780 migrate_nr_uninterruptible(rq);
5920 BUG_ON(rq->nr_running != 0); 5781 BUG_ON(rq->nr_running != 0);
5921 calc_global_load_remove(rq); 5782 calc_global_load_remove(rq);
5922 /*
5923 * No need to migrate the tasks: it was best-effort if
5924 * they didn't take sched_hotcpu_mutex. Just wake up
5925 * the requestors.
5926 */
5927 raw_spin_lock_irq(&rq->lock);
5928 while (!list_empty(&rq->migration_queue)) {
5929 struct migration_req *req;
5930
5931 req = list_entry(rq->migration_queue.next,
5932 struct migration_req, list);
5933 list_del_init(&req->list);
5934 raw_spin_unlock_irq(&rq->lock);
5935 complete(&req->done);
5936 raw_spin_lock_irq(&rq->lock);
5937 }
5938 raw_spin_unlock_irq(&rq->lock);
5939 break; 5783 break;
5940 5784
5941 case CPU_DYING: 5785 case CPU_DYING:
5942 case CPU_DYING_FROZEN: 5786 case CPU_DYING_FROZEN:
5943 /* Update our root-domain */ 5787 /* Update our root-domain */
5944 rq = cpu_rq(cpu);
5945 raw_spin_lock_irqsave(&rq->lock, flags); 5788 raw_spin_lock_irqsave(&rq->lock, flags);
5946 if (rq->rd) { 5789 if (rq->rd) {
5947 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5790 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -6272,6 +6115,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6272 struct rq *rq = cpu_rq(cpu); 6115 struct rq *rq = cpu_rq(cpu);
6273 struct sched_domain *tmp; 6116 struct sched_domain *tmp;
6274 6117
6118 for (tmp = sd; tmp; tmp = tmp->parent)
6119 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6120
6275 /* Remove the sched domains which do not contribute to scheduling. */ 6121 /* Remove the sched domains which do not contribute to scheduling. */
6276 for (tmp = sd; tmp; ) { 6122 for (tmp = sd; tmp; ) {
6277 struct sched_domain *parent = tmp->parent; 6123 struct sched_domain *parent = tmp->parent;
@@ -7755,10 +7601,8 @@ void __init sched_init(void)
7755 rq->push_cpu = 0; 7601 rq->push_cpu = 0;
7756 rq->cpu = i; 7602 rq->cpu = i;
7757 rq->online = 0; 7603 rq->online = 0;
7758 rq->migration_thread = NULL;
7759 rq->idle_stamp = 0; 7604 rq->idle_stamp = 0;
7760 rq->avg_idle = 2*sysctl_sched_migration_cost; 7605 rq->avg_idle = 2*sysctl_sched_migration_cost;
7761 INIT_LIST_HEAD(&rq->migration_queue);
7762 rq_attach_root(rq, &def_root_domain); 7606 rq_attach_root(rq, &def_root_domain);
7763#endif 7607#endif
7764 init_rq_hrtick(rq); 7608 init_rq_hrtick(rq);
@@ -7859,7 +7703,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7859{ 7703{
7860 int on_rq; 7704 int on_rq;
7861 7705
7862 update_rq_clock(rq);
7863 on_rq = p->se.on_rq; 7706 on_rq = p->se.on_rq;
7864 if (on_rq) 7707 if (on_rq)
7865 deactivate_task(rq, p, 0); 7708 deactivate_task(rq, p, 0);
@@ -7886,9 +7729,9 @@ void normalize_rt_tasks(void)
7886 7729
7887 p->se.exec_start = 0; 7730 p->se.exec_start = 0;
7888#ifdef CONFIG_SCHEDSTATS 7731#ifdef CONFIG_SCHEDSTATS
7889 p->se.wait_start = 0; 7732 p->se.statistics.wait_start = 0;
7890 p->se.sleep_start = 0; 7733 p->se.statistics.sleep_start = 0;
7891 p->se.block_start = 0; 7734 p->se.statistics.block_start = 0;
7892#endif 7735#endif
7893 7736
7894 if (!rt_task(p)) { 7737 if (!rt_task(p)) {
@@ -8221,8 +8064,6 @@ void sched_move_task(struct task_struct *tsk)
8221 8064
8222 rq = task_rq_lock(tsk, &flags); 8065 rq = task_rq_lock(tsk, &flags);
8223 8066
8224 update_rq_clock(rq);
8225
8226 running = task_current(rq, tsk); 8067 running = task_current(rq, tsk);
8227 on_rq = tsk->se.on_rq; 8068 on_rq = tsk->se.on_rq;
8228 8069
@@ -8241,7 +8082,7 @@ void sched_move_task(struct task_struct *tsk)
8241 if (unlikely(running)) 8082 if (unlikely(running))
8242 tsk->sched_class->set_curr_task(rq); 8083 tsk->sched_class->set_curr_task(rq);
8243 if (on_rq) 8084 if (on_rq)
8244 enqueue_task(rq, tsk, 0, false); 8085 enqueue_task(rq, tsk, 0);
8245 8086
8246 task_rq_unlock(rq, &flags); 8087 task_rq_unlock(rq, &flags);
8247} 8088}
@@ -9055,43 +8896,32 @@ struct cgroup_subsys cpuacct_subsys = {
9055 8896
9056#ifndef CONFIG_SMP 8897#ifndef CONFIG_SMP
9057 8898
9058int rcu_expedited_torture_stats(char *page)
9059{
9060 return 0;
9061}
9062EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
9063
9064void synchronize_sched_expedited(void) 8899void synchronize_sched_expedited(void)
9065{ 8900{
8901 barrier();
9066} 8902}
9067EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 8903EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9068 8904
9069#else /* #ifndef CONFIG_SMP */ 8905#else /* #ifndef CONFIG_SMP */
9070 8906
9071static DEFINE_PER_CPU(struct migration_req, rcu_migration_req); 8907static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
9072static DEFINE_MUTEX(rcu_sched_expedited_mutex);
9073
9074#define RCU_EXPEDITED_STATE_POST -2
9075#define RCU_EXPEDITED_STATE_IDLE -1
9076
9077static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
9078 8908
9079int rcu_expedited_torture_stats(char *page) 8909static int synchronize_sched_expedited_cpu_stop(void *data)
9080{ 8910{
9081 int cnt = 0; 8911 /*
9082 int cpu; 8912 * There must be a full memory barrier on each affected CPU
9083 8913 * between the time that try_stop_cpus() is called and the
9084 cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state); 8914 * time that it returns.
9085 for_each_online_cpu(cpu) { 8915 *
9086 cnt += sprintf(&page[cnt], " %d:%d", 8916 * In the current initial implementation of cpu_stop, the
9087 cpu, per_cpu(rcu_migration_req, cpu).dest_cpu); 8917 * above condition is already met when the control reaches
9088 } 8918 * this point and the following smp_mb() is not strictly
9089 cnt += sprintf(&page[cnt], "\n"); 8919 * necessary. Do smp_mb() anyway for documentation and
9090 return cnt; 8920 * robustness against future implementation changes.
8921 */
8922 smp_mb(); /* See above comment block. */
8923 return 0;
9091} 8924}
9092EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
9093
9094static long synchronize_sched_expedited_count;
9095 8925
9096/* 8926/*
9097 * Wait for an rcu-sched grace period to elapse, but use "big hammer" 8927 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
@@ -9105,18 +8935,14 @@ static long synchronize_sched_expedited_count;
9105 */ 8935 */
9106void synchronize_sched_expedited(void) 8936void synchronize_sched_expedited(void)
9107{ 8937{
9108 int cpu; 8938 int snap, trycount = 0;
9109 unsigned long flags;
9110 bool need_full_sync = 0;
9111 struct rq *rq;
9112 struct migration_req *req;
9113 long snap;
9114 int trycount = 0;
9115 8939
9116 smp_mb(); /* ensure prior mod happens before capturing snap. */ 8940 smp_mb(); /* ensure prior mod happens before capturing snap. */
9117 snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; 8941 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
9118 get_online_cpus(); 8942 get_online_cpus();
9119 while (!mutex_trylock(&rcu_sched_expedited_mutex)) { 8943 while (try_stop_cpus(cpu_online_mask,
8944 synchronize_sched_expedited_cpu_stop,
8945 NULL) == -EAGAIN) {
9120 put_online_cpus(); 8946 put_online_cpus();
9121 if (trycount++ < 10) 8947 if (trycount++ < 10)
9122 udelay(trycount * num_online_cpus()); 8948 udelay(trycount * num_online_cpus());
@@ -9124,41 +8950,15 @@ void synchronize_sched_expedited(void)
9124 synchronize_sched(); 8950 synchronize_sched();
9125 return; 8951 return;
9126 } 8952 }
9127 if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { 8953 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
9128 smp_mb(); /* ensure test happens before caller kfree */ 8954 smp_mb(); /* ensure test happens before caller kfree */
9129 return; 8955 return;
9130 } 8956 }
9131 get_online_cpus(); 8957 get_online_cpus();
9132 } 8958 }
9133 rcu_expedited_state = RCU_EXPEDITED_STATE_POST; 8959 atomic_inc(&synchronize_sched_expedited_count);
9134 for_each_online_cpu(cpu) { 8960 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
9135 rq = cpu_rq(cpu);
9136 req = &per_cpu(rcu_migration_req, cpu);
9137 init_completion(&req->done);
9138 req->task = NULL;
9139 req->dest_cpu = RCU_MIGRATION_NEED_QS;
9140 raw_spin_lock_irqsave(&rq->lock, flags);
9141 list_add(&req->list, &rq->migration_queue);
9142 raw_spin_unlock_irqrestore(&rq->lock, flags);
9143 wake_up_process(rq->migration_thread);
9144 }
9145 for_each_online_cpu(cpu) {
9146 rcu_expedited_state = cpu;
9147 req = &per_cpu(rcu_migration_req, cpu);
9148 rq = cpu_rq(cpu);
9149 wait_for_completion(&req->done);
9150 raw_spin_lock_irqsave(&rq->lock, flags);
9151 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
9152 need_full_sync = 1;
9153 req->dest_cpu = RCU_MIGRATION_IDLE;
9154 raw_spin_unlock_irqrestore(&rq->lock, flags);
9155 }
9156 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
9157 synchronize_sched_expedited_count++;
9158 mutex_unlock(&rcu_sched_expedited_mutex);
9159 put_online_cpus(); 8961 put_online_cpus();
9160 if (need_full_sync)
9161 synchronize_sched();
9162} 8962}
9163EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 8963EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9164 8964