diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 339 |
1 files changed, 124 insertions, 215 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6af210a7de70..ab562ae4007c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -493,8 +493,11 @@ struct rq { | |||
493 | #define CPU_LOAD_IDX_MAX 5 | 493 | #define CPU_LOAD_IDX_MAX 5 |
494 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 494 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
495 | #ifdef CONFIG_NO_HZ | 495 | #ifdef CONFIG_NO_HZ |
496 | u64 nohz_stamp; | ||
496 | unsigned char in_nohz_recently; | 497 | unsigned char in_nohz_recently; |
497 | #endif | 498 | #endif |
499 | unsigned int skip_clock_update; | ||
500 | |||
498 | /* capture load from *all* tasks on this cpu: */ | 501 | /* capture load from *all* tasks on this cpu: */ |
499 | struct load_weight load; | 502 | struct load_weight load; |
500 | unsigned long nr_load_updates; | 503 | unsigned long nr_load_updates; |
@@ -592,6 +595,13 @@ static inline | |||
592 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | 595 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
593 | { | 596 | { |
594 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | 597 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
598 | |||
599 | /* | ||
600 | * A queue event has occurred, and we're going to schedule. In | ||
601 | * this case, we can save a useless back to back clock update. | ||
602 | */ | ||
603 | if (test_tsk_need_resched(p)) | ||
604 | rq->skip_clock_update = 1; | ||
595 | } | 605 | } |
596 | 606 | ||
597 | static inline int cpu_of(struct rq *rq) | 607 | static inline int cpu_of(struct rq *rq) |
@@ -626,7 +636,8 @@ static inline int cpu_of(struct rq *rq) | |||
626 | 636 | ||
627 | inline void update_rq_clock(struct rq *rq) | 637 | inline void update_rq_clock(struct rq *rq) |
628 | { | 638 | { |
629 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 639 | if (!rq->skip_clock_update) |
640 | rq->clock = sched_clock_cpu(cpu_of(rq)); | ||
630 | } | 641 | } |
631 | 642 | ||
632 | /* | 643 | /* |
@@ -904,16 +915,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
904 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 915 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
905 | 916 | ||
906 | /* | 917 | /* |
907 | * Check whether the task is waking, we use this to synchronize against | 918 | * Check whether the task is waking, we use this to synchronize ->cpus_allowed |
908 | * ttwu() so that task_cpu() reports a stable number. | 919 | * against ttwu(). |
909 | * | ||
910 | * We need to make an exception for PF_STARTING tasks because the fork | ||
911 | * path might require task_rq_lock() to work, eg. it can call | ||
912 | * set_cpus_allowed_ptr() from the cpuset clone_ns code. | ||
913 | */ | 920 | */ |
914 | static inline int task_is_waking(struct task_struct *p) | 921 | static inline int task_is_waking(struct task_struct *p) |
915 | { | 922 | { |
916 | return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING)); | 923 | return unlikely(p->state == TASK_WAKING); |
917 | } | 924 | } |
918 | 925 | ||
919 | /* | 926 | /* |
@@ -926,11 +933,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
926 | struct rq *rq; | 933 | struct rq *rq; |
927 | 934 | ||
928 | for (;;) { | 935 | for (;;) { |
929 | while (task_is_waking(p)) | ||
930 | cpu_relax(); | ||
931 | rq = task_rq(p); | 936 | rq = task_rq(p); |
932 | raw_spin_lock(&rq->lock); | 937 | raw_spin_lock(&rq->lock); |
933 | if (likely(rq == task_rq(p) && !task_is_waking(p))) | 938 | if (likely(rq == task_rq(p))) |
934 | return rq; | 939 | return rq; |
935 | raw_spin_unlock(&rq->lock); | 940 | raw_spin_unlock(&rq->lock); |
936 | } | 941 | } |
@@ -947,12 +952,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
947 | struct rq *rq; | 952 | struct rq *rq; |
948 | 953 | ||
949 | for (;;) { | 954 | for (;;) { |
950 | while (task_is_waking(p)) | ||
951 | cpu_relax(); | ||
952 | local_irq_save(*flags); | 955 | local_irq_save(*flags); |
953 | rq = task_rq(p); | 956 | rq = task_rq(p); |
954 | raw_spin_lock(&rq->lock); | 957 | raw_spin_lock(&rq->lock); |
955 | if (likely(rq == task_rq(p) && !task_is_waking(p))) | 958 | if (likely(rq == task_rq(p))) |
956 | return rq; | 959 | return rq; |
957 | raw_spin_unlock_irqrestore(&rq->lock, *flags); | 960 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
958 | } | 961 | } |
@@ -1229,6 +1232,17 @@ void wake_up_idle_cpu(int cpu) | |||
1229 | if (!tsk_is_polling(rq->idle)) | 1232 | if (!tsk_is_polling(rq->idle)) |
1230 | smp_send_reschedule(cpu); | 1233 | smp_send_reschedule(cpu); |
1231 | } | 1234 | } |
1235 | |||
1236 | int nohz_ratelimit(int cpu) | ||
1237 | { | ||
1238 | struct rq *rq = cpu_rq(cpu); | ||
1239 | u64 diff = rq->clock - rq->nohz_stamp; | ||
1240 | |||
1241 | rq->nohz_stamp = rq->clock; | ||
1242 | |||
1243 | return diff < (NSEC_PER_SEC / HZ) >> 1; | ||
1244 | } | ||
1245 | |||
1232 | #endif /* CONFIG_NO_HZ */ | 1246 | #endif /* CONFIG_NO_HZ */ |
1233 | 1247 | ||
1234 | static u64 sched_avg_period(void) | 1248 | static u64 sched_avg_period(void) |
@@ -1771,8 +1785,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
1771 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | 1785 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
1772 | } | 1786 | } |
1773 | } | 1787 | } |
1774 | update_rq_clock(rq1); | ||
1775 | update_rq_clock(rq2); | ||
1776 | } | 1788 | } |
1777 | 1789 | ||
1778 | /* | 1790 | /* |
@@ -1866,56 +1878,43 @@ static void update_avg(u64 *avg, u64 sample) | |||
1866 | *avg += diff >> 3; | 1878 | *avg += diff >> 3; |
1867 | } | 1879 | } |
1868 | 1880 | ||
1869 | static void | 1881 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
1870 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) | ||
1871 | { | 1882 | { |
1872 | if (wakeup) | 1883 | update_rq_clock(rq); |
1873 | p->se.start_runtime = p->se.sum_exec_runtime; | ||
1874 | |||
1875 | sched_info_queued(p); | 1884 | sched_info_queued(p); |
1876 | p->sched_class->enqueue_task(rq, p, wakeup, head); | 1885 | p->sched_class->enqueue_task(rq, p, flags); |
1877 | p->se.on_rq = 1; | 1886 | p->se.on_rq = 1; |
1878 | } | 1887 | } |
1879 | 1888 | ||
1880 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1889 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
1881 | { | 1890 | { |
1882 | if (sleep) { | 1891 | update_rq_clock(rq); |
1883 | if (p->se.last_wakeup) { | ||
1884 | update_avg(&p->se.avg_overlap, | ||
1885 | p->se.sum_exec_runtime - p->se.last_wakeup); | ||
1886 | p->se.last_wakeup = 0; | ||
1887 | } else { | ||
1888 | update_avg(&p->se.avg_wakeup, | ||
1889 | sysctl_sched_wakeup_granularity); | ||
1890 | } | ||
1891 | } | ||
1892 | |||
1893 | sched_info_dequeued(p); | 1892 | sched_info_dequeued(p); |
1894 | p->sched_class->dequeue_task(rq, p, sleep); | 1893 | p->sched_class->dequeue_task(rq, p, flags); |
1895 | p->se.on_rq = 0; | 1894 | p->se.on_rq = 0; |
1896 | } | 1895 | } |
1897 | 1896 | ||
1898 | /* | 1897 | /* |
1899 | * activate_task - move a task to the runqueue. | 1898 | * activate_task - move a task to the runqueue. |
1900 | */ | 1899 | */ |
1901 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 1900 | static void activate_task(struct rq *rq, struct task_struct *p, int flags) |
1902 | { | 1901 | { |
1903 | if (task_contributes_to_load(p)) | 1902 | if (task_contributes_to_load(p)) |
1904 | rq->nr_uninterruptible--; | 1903 | rq->nr_uninterruptible--; |
1905 | 1904 | ||
1906 | enqueue_task(rq, p, wakeup, false); | 1905 | enqueue_task(rq, p, flags); |
1907 | inc_nr_running(rq); | 1906 | inc_nr_running(rq); |
1908 | } | 1907 | } |
1909 | 1908 | ||
1910 | /* | 1909 | /* |
1911 | * deactivate_task - remove a task from the runqueue. | 1910 | * deactivate_task - remove a task from the runqueue. |
1912 | */ | 1911 | */ |
1913 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 1912 | static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
1914 | { | 1913 | { |
1915 | if (task_contributes_to_load(p)) | 1914 | if (task_contributes_to_load(p)) |
1916 | rq->nr_uninterruptible++; | 1915 | rq->nr_uninterruptible++; |
1917 | 1916 | ||
1918 | dequeue_task(rq, p, sleep); | 1917 | dequeue_task(rq, p, flags); |
1919 | dec_nr_running(rq); | 1918 | dec_nr_running(rq); |
1920 | } | 1919 | } |
1921 | 1920 | ||
@@ -2273,6 +2272,9 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2273 | } | 2272 | } |
2274 | 2273 | ||
2275 | #ifdef CONFIG_SMP | 2274 | #ifdef CONFIG_SMP |
2275 | /* | ||
2276 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | ||
2277 | */ | ||
2276 | static int select_fallback_rq(int cpu, struct task_struct *p) | 2278 | static int select_fallback_rq(int cpu, struct task_struct *p) |
2277 | { | 2279 | { |
2278 | int dest_cpu; | 2280 | int dest_cpu; |
@@ -2289,12 +2291,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2289 | return dest_cpu; | 2291 | return dest_cpu; |
2290 | 2292 | ||
2291 | /* No more Mr. Nice Guy. */ | 2293 | /* No more Mr. Nice Guy. */ |
2292 | if (dest_cpu >= nr_cpu_ids) { | 2294 | if (unlikely(dest_cpu >= nr_cpu_ids)) { |
2293 | rcu_read_lock(); | 2295 | dest_cpu = cpuset_cpus_allowed_fallback(p); |
2294 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
2295 | rcu_read_unlock(); | ||
2296 | dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); | ||
2297 | |||
2298 | /* | 2296 | /* |
2299 | * Don't tell them about moving exiting tasks or | 2297 | * Don't tell them about moving exiting tasks or |
2300 | * kernel threads (both mm NULL), since they never | 2298 | * kernel threads (both mm NULL), since they never |
@@ -2311,17 +2309,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2311 | } | 2309 | } |
2312 | 2310 | ||
2313 | /* | 2311 | /* |
2314 | * Gets called from 3 sites (exec, fork, wakeup), since it is called without | 2312 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. |
2315 | * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done | ||
2316 | * by: | ||
2317 | * | ||
2318 | * exec: is unstable, retry loop | ||
2319 | * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING | ||
2320 | */ | 2313 | */ |
2321 | static inline | 2314 | static inline |
2322 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | 2315 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) |
2323 | { | 2316 | { |
2324 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); | 2317 | int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); |
2325 | 2318 | ||
2326 | /* | 2319 | /* |
2327 | * In order not to call set_task_cpu() on a blocking task we need | 2320 | * In order not to call set_task_cpu() on a blocking task we need |
@@ -2360,16 +2353,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2360 | { | 2353 | { |
2361 | int cpu, orig_cpu, this_cpu, success = 0; | 2354 | int cpu, orig_cpu, this_cpu, success = 0; |
2362 | unsigned long flags; | 2355 | unsigned long flags; |
2356 | unsigned long en_flags = ENQUEUE_WAKEUP; | ||
2363 | struct rq *rq; | 2357 | struct rq *rq; |
2364 | 2358 | ||
2365 | if (!sched_feat(SYNC_WAKEUPS)) | ||
2366 | wake_flags &= ~WF_SYNC; | ||
2367 | |||
2368 | this_cpu = get_cpu(); | 2359 | this_cpu = get_cpu(); |
2369 | 2360 | ||
2370 | smp_wmb(); | 2361 | smp_wmb(); |
2371 | rq = task_rq_lock(p, &flags); | 2362 | rq = task_rq_lock(p, &flags); |
2372 | update_rq_clock(rq); | ||
2373 | if (!(p->state & state)) | 2363 | if (!(p->state & state)) |
2374 | goto out; | 2364 | goto out; |
2375 | 2365 | ||
@@ -2389,28 +2379,26 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2389 | * | 2379 | * |
2390 | * First fix up the nr_uninterruptible count: | 2380 | * First fix up the nr_uninterruptible count: |
2391 | */ | 2381 | */ |
2392 | if (task_contributes_to_load(p)) | 2382 | if (task_contributes_to_load(p)) { |
2393 | rq->nr_uninterruptible--; | 2383 | if (likely(cpu_online(orig_cpu))) |
2384 | rq->nr_uninterruptible--; | ||
2385 | else | ||
2386 | this_rq()->nr_uninterruptible--; | ||
2387 | } | ||
2394 | p->state = TASK_WAKING; | 2388 | p->state = TASK_WAKING; |
2395 | 2389 | ||
2396 | if (p->sched_class->task_waking) | 2390 | if (p->sched_class->task_waking) { |
2397 | p->sched_class->task_waking(rq, p); | 2391 | p->sched_class->task_waking(rq, p); |
2392 | en_flags |= ENQUEUE_WAKING; | ||
2393 | } | ||
2398 | 2394 | ||
2399 | __task_rq_unlock(rq); | 2395 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); |
2400 | 2396 | if (cpu != orig_cpu) | |
2401 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | ||
2402 | if (cpu != orig_cpu) { | ||
2403 | /* | ||
2404 | * Since we migrate the task without holding any rq->lock, | ||
2405 | * we need to be careful with task_rq_lock(), since that | ||
2406 | * might end up locking an invalid rq. | ||
2407 | */ | ||
2408 | set_task_cpu(p, cpu); | 2397 | set_task_cpu(p, cpu); |
2409 | } | 2398 | __task_rq_unlock(rq); |
2410 | 2399 | ||
2411 | rq = cpu_rq(cpu); | 2400 | rq = cpu_rq(cpu); |
2412 | raw_spin_lock(&rq->lock); | 2401 | raw_spin_lock(&rq->lock); |
2413 | update_rq_clock(rq); | ||
2414 | 2402 | ||
2415 | /* | 2403 | /* |
2416 | * We migrated the task without holding either rq->lock, however | 2404 | * We migrated the task without holding either rq->lock, however |
@@ -2438,34 +2426,18 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2438 | 2426 | ||
2439 | out_activate: | 2427 | out_activate: |
2440 | #endif /* CONFIG_SMP */ | 2428 | #endif /* CONFIG_SMP */ |
2441 | schedstat_inc(p, se.nr_wakeups); | 2429 | schedstat_inc(p, se.statistics.nr_wakeups); |
2442 | if (wake_flags & WF_SYNC) | 2430 | if (wake_flags & WF_SYNC) |
2443 | schedstat_inc(p, se.nr_wakeups_sync); | 2431 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
2444 | if (orig_cpu != cpu) | 2432 | if (orig_cpu != cpu) |
2445 | schedstat_inc(p, se.nr_wakeups_migrate); | 2433 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
2446 | if (cpu == this_cpu) | 2434 | if (cpu == this_cpu) |
2447 | schedstat_inc(p, se.nr_wakeups_local); | 2435 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
2448 | else | 2436 | else |
2449 | schedstat_inc(p, se.nr_wakeups_remote); | 2437 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
2450 | activate_task(rq, p, 1); | 2438 | activate_task(rq, p, en_flags); |
2451 | success = 1; | 2439 | success = 1; |
2452 | 2440 | ||
2453 | /* | ||
2454 | * Only attribute actual wakeups done by this task. | ||
2455 | */ | ||
2456 | if (!in_interrupt()) { | ||
2457 | struct sched_entity *se = ¤t->se; | ||
2458 | u64 sample = se->sum_exec_runtime; | ||
2459 | |||
2460 | if (se->last_wakeup) | ||
2461 | sample -= se->last_wakeup; | ||
2462 | else | ||
2463 | sample -= se->start_runtime; | ||
2464 | update_avg(&se->avg_wakeup, sample); | ||
2465 | |||
2466 | se->last_wakeup = se->sum_exec_runtime; | ||
2467 | } | ||
2468 | |||
2469 | out_running: | 2441 | out_running: |
2470 | trace_sched_wakeup(rq, p, success); | 2442 | trace_sched_wakeup(rq, p, success); |
2471 | check_preempt_curr(rq, p, wake_flags); | 2443 | check_preempt_curr(rq, p, wake_flags); |
@@ -2527,42 +2499,9 @@ static void __sched_fork(struct task_struct *p) | |||
2527 | p->se.sum_exec_runtime = 0; | 2499 | p->se.sum_exec_runtime = 0; |
2528 | p->se.prev_sum_exec_runtime = 0; | 2500 | p->se.prev_sum_exec_runtime = 0; |
2529 | p->se.nr_migrations = 0; | 2501 | p->se.nr_migrations = 0; |
2530 | p->se.last_wakeup = 0; | ||
2531 | p->se.avg_overlap = 0; | ||
2532 | p->se.start_runtime = 0; | ||
2533 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | ||
2534 | 2502 | ||
2535 | #ifdef CONFIG_SCHEDSTATS | 2503 | #ifdef CONFIG_SCHEDSTATS |
2536 | p->se.wait_start = 0; | 2504 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
2537 | p->se.wait_max = 0; | ||
2538 | p->se.wait_count = 0; | ||
2539 | p->se.wait_sum = 0; | ||
2540 | |||
2541 | p->se.sleep_start = 0; | ||
2542 | p->se.sleep_max = 0; | ||
2543 | p->se.sum_sleep_runtime = 0; | ||
2544 | |||
2545 | p->se.block_start = 0; | ||
2546 | p->se.block_max = 0; | ||
2547 | p->se.exec_max = 0; | ||
2548 | p->se.slice_max = 0; | ||
2549 | |||
2550 | p->se.nr_migrations_cold = 0; | ||
2551 | p->se.nr_failed_migrations_affine = 0; | ||
2552 | p->se.nr_failed_migrations_running = 0; | ||
2553 | p->se.nr_failed_migrations_hot = 0; | ||
2554 | p->se.nr_forced_migrations = 0; | ||
2555 | |||
2556 | p->se.nr_wakeups = 0; | ||
2557 | p->se.nr_wakeups_sync = 0; | ||
2558 | p->se.nr_wakeups_migrate = 0; | ||
2559 | p->se.nr_wakeups_local = 0; | ||
2560 | p->se.nr_wakeups_remote = 0; | ||
2561 | p->se.nr_wakeups_affine = 0; | ||
2562 | p->se.nr_wakeups_affine_attempts = 0; | ||
2563 | p->se.nr_wakeups_passive = 0; | ||
2564 | p->se.nr_wakeups_idle = 0; | ||
2565 | |||
2566 | #endif | 2505 | #endif |
2567 | 2506 | ||
2568 | INIT_LIST_HEAD(&p->rt.run_list); | 2507 | INIT_LIST_HEAD(&p->rt.run_list); |
@@ -2583,11 +2522,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2583 | 2522 | ||
2584 | __sched_fork(p); | 2523 | __sched_fork(p); |
2585 | /* | 2524 | /* |
2586 | * We mark the process as waking here. This guarantees that | 2525 | * We mark the process as running here. This guarantees that |
2587 | * nobody will actually run it, and a signal or other external | 2526 | * nobody will actually run it, and a signal or other external |
2588 | * event cannot wake it up and insert it on the runqueue either. | 2527 | * event cannot wake it up and insert it on the runqueue either. |
2589 | */ | 2528 | */ |
2590 | p->state = TASK_WAKING; | 2529 | p->state = TASK_RUNNING; |
2591 | 2530 | ||
2592 | /* | 2531 | /* |
2593 | * Revert to default priority/policy on fork if requested. | 2532 | * Revert to default priority/policy on fork if requested. |
@@ -2654,29 +2593,25 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2654 | int cpu __maybe_unused = get_cpu(); | 2593 | int cpu __maybe_unused = get_cpu(); |
2655 | 2594 | ||
2656 | #ifdef CONFIG_SMP | 2595 | #ifdef CONFIG_SMP |
2596 | rq = task_rq_lock(p, &flags); | ||
2597 | p->state = TASK_WAKING; | ||
2598 | |||
2657 | /* | 2599 | /* |
2658 | * Fork balancing, do it here and not earlier because: | 2600 | * Fork balancing, do it here and not earlier because: |
2659 | * - cpus_allowed can change in the fork path | 2601 | * - cpus_allowed can change in the fork path |
2660 | * - any previously selected cpu might disappear through hotplug | 2602 | * - any previously selected cpu might disappear through hotplug |
2661 | * | 2603 | * |
2662 | * We still have TASK_WAKING but PF_STARTING is gone now, meaning | 2604 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock |
2663 | * ->cpus_allowed is stable, we have preemption disabled, meaning | 2605 | * without people poking at ->cpus_allowed. |
2664 | * cpu_online_mask is stable. | ||
2665 | */ | 2606 | */ |
2666 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | 2607 | cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); |
2667 | set_task_cpu(p, cpu); | 2608 | set_task_cpu(p, cpu); |
2668 | #endif | ||
2669 | |||
2670 | /* | ||
2671 | * Since the task is not on the rq and we still have TASK_WAKING set | ||
2672 | * nobody else will migrate this task. | ||
2673 | */ | ||
2674 | rq = cpu_rq(cpu); | ||
2675 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
2676 | 2609 | ||
2677 | BUG_ON(p->state != TASK_WAKING); | ||
2678 | p->state = TASK_RUNNING; | 2610 | p->state = TASK_RUNNING; |
2679 | update_rq_clock(rq); | 2611 | task_rq_unlock(rq, &flags); |
2612 | #endif | ||
2613 | |||
2614 | rq = task_rq_lock(p, &flags); | ||
2680 | activate_task(rq, p, 0); | 2615 | activate_task(rq, p, 0); |
2681 | trace_sched_wakeup_new(rq, p, 1); | 2616 | trace_sched_wakeup_new(rq, p, 1); |
2682 | check_preempt_curr(rq, p, WF_FORK); | 2617 | check_preempt_curr(rq, p, WF_FORK); |
@@ -3122,32 +3057,21 @@ void sched_exec(void) | |||
3122 | { | 3057 | { |
3123 | struct task_struct *p = current; | 3058 | struct task_struct *p = current; |
3124 | struct migration_req req; | 3059 | struct migration_req req; |
3125 | int dest_cpu, this_cpu; | ||
3126 | unsigned long flags; | 3060 | unsigned long flags; |
3127 | struct rq *rq; | 3061 | struct rq *rq; |
3128 | 3062 | int dest_cpu; | |
3129 | again: | ||
3130 | this_cpu = get_cpu(); | ||
3131 | dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); | ||
3132 | if (dest_cpu == this_cpu) { | ||
3133 | put_cpu(); | ||
3134 | return; | ||
3135 | } | ||
3136 | 3063 | ||
3137 | rq = task_rq_lock(p, &flags); | 3064 | rq = task_rq_lock(p, &flags); |
3138 | put_cpu(); | 3065 | dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); |
3066 | if (dest_cpu == smp_processor_id()) | ||
3067 | goto unlock; | ||
3139 | 3068 | ||
3140 | /* | 3069 | /* |
3141 | * select_task_rq() can race against ->cpus_allowed | 3070 | * select_task_rq() can race against ->cpus_allowed |
3142 | */ | 3071 | */ |
3143 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) | 3072 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
3144 | || unlikely(!cpu_active(dest_cpu))) { | 3073 | likely(cpu_active(dest_cpu)) && |
3145 | task_rq_unlock(rq, &flags); | 3074 | migrate_task(p, dest_cpu, &req)) { |
3146 | goto again; | ||
3147 | } | ||
3148 | |||
3149 | /* force the process onto the specified CPU */ | ||
3150 | if (migrate_task(p, dest_cpu, &req)) { | ||
3151 | /* Need to wait for migration thread (might exit: take ref). */ | 3075 | /* Need to wait for migration thread (might exit: take ref). */ |
3152 | struct task_struct *mt = rq->migration_thread; | 3076 | struct task_struct *mt = rq->migration_thread; |
3153 | 3077 | ||
@@ -3159,6 +3083,7 @@ again: | |||
3159 | 3083 | ||
3160 | return; | 3084 | return; |
3161 | } | 3085 | } |
3086 | unlock: | ||
3162 | task_rq_unlock(rq, &flags); | 3087 | task_rq_unlock(rq, &flags); |
3163 | } | 3088 | } |
3164 | 3089 | ||
@@ -3630,23 +3555,9 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3630 | 3555 | ||
3631 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 3556 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
3632 | { | 3557 | { |
3633 | if (prev->state == TASK_RUNNING) { | 3558 | if (prev->se.on_rq) |
3634 | u64 runtime = prev->se.sum_exec_runtime; | 3559 | update_rq_clock(rq); |
3635 | 3560 | rq->skip_clock_update = 0; | |
3636 | runtime -= prev->se.prev_sum_exec_runtime; | ||
3637 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
3638 | |||
3639 | /* | ||
3640 | * In order to avoid avg_overlap growing stale when we are | ||
3641 | * indeed overlapping and hence not getting put to sleep, grow | ||
3642 | * the avg_overlap on preemption. | ||
3643 | * | ||
3644 | * We use the average preemption runtime because that | ||
3645 | * correlates to the amount of cache footprint a task can | ||
3646 | * build up. | ||
3647 | */ | ||
3648 | update_avg(&prev->se.avg_overlap, runtime); | ||
3649 | } | ||
3650 | prev->sched_class->put_prev_task(rq, prev); | 3561 | prev->sched_class->put_prev_task(rq, prev); |
3651 | } | 3562 | } |
3652 | 3563 | ||
@@ -3709,14 +3620,13 @@ need_resched_nonpreemptible: | |||
3709 | hrtick_clear(rq); | 3620 | hrtick_clear(rq); |
3710 | 3621 | ||
3711 | raw_spin_lock_irq(&rq->lock); | 3622 | raw_spin_lock_irq(&rq->lock); |
3712 | update_rq_clock(rq); | ||
3713 | clear_tsk_need_resched(prev); | 3623 | clear_tsk_need_resched(prev); |
3714 | 3624 | ||
3715 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3625 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3716 | if (unlikely(signal_pending_state(prev->state, prev))) | 3626 | if (unlikely(signal_pending_state(prev->state, prev))) |
3717 | prev->state = TASK_RUNNING; | 3627 | prev->state = TASK_RUNNING; |
3718 | else | 3628 | else |
3719 | deactivate_task(rq, prev, 1); | 3629 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
3720 | switch_count = &prev->nvcsw; | 3630 | switch_count = &prev->nvcsw; |
3721 | } | 3631 | } |
3722 | 3632 | ||
@@ -4266,7 +4176,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4266 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 4176 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
4267 | 4177 | ||
4268 | rq = task_rq_lock(p, &flags); | 4178 | rq = task_rq_lock(p, &flags); |
4269 | update_rq_clock(rq); | ||
4270 | 4179 | ||
4271 | oldprio = p->prio; | 4180 | oldprio = p->prio; |
4272 | prev_class = p->sched_class; | 4181 | prev_class = p->sched_class; |
@@ -4287,7 +4196,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4287 | if (running) | 4196 | if (running) |
4288 | p->sched_class->set_curr_task(rq); | 4197 | p->sched_class->set_curr_task(rq); |
4289 | if (on_rq) { | 4198 | if (on_rq) { |
4290 | enqueue_task(rq, p, 0, oldprio < prio); | 4199 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
4291 | 4200 | ||
4292 | check_class_changed(rq, p, prev_class, oldprio, running); | 4201 | check_class_changed(rq, p, prev_class, oldprio, running); |
4293 | } | 4202 | } |
@@ -4309,7 +4218,6 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4309 | * the task might be in the middle of scheduling on another CPU. | 4218 | * the task might be in the middle of scheduling on another CPU. |
4310 | */ | 4219 | */ |
4311 | rq = task_rq_lock(p, &flags); | 4220 | rq = task_rq_lock(p, &flags); |
4312 | update_rq_clock(rq); | ||
4313 | /* | 4221 | /* |
4314 | * The RT priorities are set via sched_setscheduler(), but we still | 4222 | * The RT priorities are set via sched_setscheduler(), but we still |
4315 | * allow the 'normal' nice value to be set - but as expected | 4223 | * allow the 'normal' nice value to be set - but as expected |
@@ -4331,7 +4239,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4331 | delta = p->prio - old_prio; | 4239 | delta = p->prio - old_prio; |
4332 | 4240 | ||
4333 | if (on_rq) { | 4241 | if (on_rq) { |
4334 | enqueue_task(rq, p, 0, false); | 4242 | enqueue_task(rq, p, 0); |
4335 | /* | 4243 | /* |
4336 | * If the task increased its priority or is running and | 4244 | * If the task increased its priority or is running and |
4337 | * lowered its priority, then reschedule its CPU: | 4245 | * lowered its priority, then reschedule its CPU: |
@@ -4592,7 +4500,6 @@ recheck: | |||
4592 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | 4500 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
4593 | goto recheck; | 4501 | goto recheck; |
4594 | } | 4502 | } |
4595 | update_rq_clock(rq); | ||
4596 | on_rq = p->se.on_rq; | 4503 | on_rq = p->se.on_rq; |
4597 | running = task_current(rq, p); | 4504 | running = task_current(rq, p); |
4598 | if (on_rq) | 4505 | if (on_rq) |
@@ -5358,7 +5265,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5358 | struct rq *rq; | 5265 | struct rq *rq; |
5359 | int ret = 0; | 5266 | int ret = 0; |
5360 | 5267 | ||
5268 | /* | ||
5269 | * Serialize against TASK_WAKING so that ttwu() and wunt() can | ||
5270 | * drop the rq->lock and still rely on ->cpus_allowed. | ||
5271 | */ | ||
5272 | again: | ||
5273 | while (task_is_waking(p)) | ||
5274 | cpu_relax(); | ||
5361 | rq = task_rq_lock(p, &flags); | 5275 | rq = task_rq_lock(p, &flags); |
5276 | if (task_is_waking(p)) { | ||
5277 | task_rq_unlock(rq, &flags); | ||
5278 | goto again; | ||
5279 | } | ||
5362 | 5280 | ||
5363 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { | 5281 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
5364 | ret = -EINVAL; | 5282 | ret = -EINVAL; |
@@ -5516,30 +5434,29 @@ static int migration_thread(void *data) | |||
5516 | } | 5434 | } |
5517 | 5435 | ||
5518 | #ifdef CONFIG_HOTPLUG_CPU | 5436 | #ifdef CONFIG_HOTPLUG_CPU |
5519 | |||
5520 | static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | ||
5521 | { | ||
5522 | int ret; | ||
5523 | |||
5524 | local_irq_disable(); | ||
5525 | ret = __migrate_task(p, src_cpu, dest_cpu); | ||
5526 | local_irq_enable(); | ||
5527 | return ret; | ||
5528 | } | ||
5529 | |||
5530 | /* | 5437 | /* |
5531 | * Figure out where task on dead CPU should go, use force if necessary. | 5438 | * Figure out where task on dead CPU should go, use force if necessary. |
5532 | */ | 5439 | */ |
5533 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 5440 | void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
5534 | { | 5441 | { |
5535 | int dest_cpu; | 5442 | struct rq *rq = cpu_rq(dead_cpu); |
5443 | int needs_cpu, uninitialized_var(dest_cpu); | ||
5444 | unsigned long flags; | ||
5536 | 5445 | ||
5537 | again: | 5446 | local_irq_save(flags); |
5538 | dest_cpu = select_fallback_rq(dead_cpu, p); | ||
5539 | 5447 | ||
5540 | /* It can have affinity changed while we were choosing. */ | 5448 | raw_spin_lock(&rq->lock); |
5541 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | 5449 | needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING); |
5542 | goto again; | 5450 | if (needs_cpu) |
5451 | dest_cpu = select_fallback_rq(dead_cpu, p); | ||
5452 | raw_spin_unlock(&rq->lock); | ||
5453 | /* | ||
5454 | * It can only fail if we race with set_cpus_allowed(), | ||
5455 | * in the racer should migrate the task anyway. | ||
5456 | */ | ||
5457 | if (needs_cpu) | ||
5458 | __migrate_task(p, dead_cpu, dest_cpu); | ||
5459 | local_irq_restore(flags); | ||
5543 | } | 5460 | } |
5544 | 5461 | ||
5545 | /* | 5462 | /* |
@@ -5603,7 +5520,6 @@ void sched_idle_next(void) | |||
5603 | 5520 | ||
5604 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 5521 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
5605 | 5522 | ||
5606 | update_rq_clock(rq); | ||
5607 | activate_task(rq, p, 0); | 5523 | activate_task(rq, p, 0); |
5608 | 5524 | ||
5609 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5525 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
@@ -5658,7 +5574,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5658 | for ( ; ; ) { | 5574 | for ( ; ; ) { |
5659 | if (!rq->nr_running) | 5575 | if (!rq->nr_running) |
5660 | break; | 5576 | break; |
5661 | update_rq_clock(rq); | ||
5662 | next = pick_next_task(rq); | 5577 | next = pick_next_task(rq); |
5663 | if (!next) | 5578 | if (!next) |
5664 | break; | 5579 | break; |
@@ -5934,7 +5849,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5934 | 5849 | ||
5935 | case CPU_DEAD: | 5850 | case CPU_DEAD: |
5936 | case CPU_DEAD_FROZEN: | 5851 | case CPU_DEAD_FROZEN: |
5937 | cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ | ||
5938 | migrate_live_tasks(cpu); | 5852 | migrate_live_tasks(cpu); |
5939 | rq = cpu_rq(cpu); | 5853 | rq = cpu_rq(cpu); |
5940 | kthread_stop(rq->migration_thread); | 5854 | kthread_stop(rq->migration_thread); |
@@ -5942,13 +5856,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5942 | rq->migration_thread = NULL; | 5856 | rq->migration_thread = NULL; |
5943 | /* Idle task back to normal (off runqueue, low prio) */ | 5857 | /* Idle task back to normal (off runqueue, low prio) */ |
5944 | raw_spin_lock_irq(&rq->lock); | 5858 | raw_spin_lock_irq(&rq->lock); |
5945 | update_rq_clock(rq); | ||
5946 | deactivate_task(rq, rq->idle, 0); | 5859 | deactivate_task(rq, rq->idle, 0); |
5947 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 5860 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
5948 | rq->idle->sched_class = &idle_sched_class; | 5861 | rq->idle->sched_class = &idle_sched_class; |
5949 | migrate_dead_tasks(cpu); | 5862 | migrate_dead_tasks(cpu); |
5950 | raw_spin_unlock_irq(&rq->lock); | 5863 | raw_spin_unlock_irq(&rq->lock); |
5951 | cpuset_unlock(); | ||
5952 | migrate_nr_uninterruptible(rq); | 5864 | migrate_nr_uninterruptible(rq); |
5953 | BUG_ON(rq->nr_running != 0); | 5865 | BUG_ON(rq->nr_running != 0); |
5954 | calc_global_load_remove(rq); | 5866 | calc_global_load_remove(rq); |
@@ -7892,7 +7804,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
7892 | { | 7804 | { |
7893 | int on_rq; | 7805 | int on_rq; |
7894 | 7806 | ||
7895 | update_rq_clock(rq); | ||
7896 | on_rq = p->se.on_rq; | 7807 | on_rq = p->se.on_rq; |
7897 | if (on_rq) | 7808 | if (on_rq) |
7898 | deactivate_task(rq, p, 0); | 7809 | deactivate_task(rq, p, 0); |
@@ -7919,9 +7830,9 @@ void normalize_rt_tasks(void) | |||
7919 | 7830 | ||
7920 | p->se.exec_start = 0; | 7831 | p->se.exec_start = 0; |
7921 | #ifdef CONFIG_SCHEDSTATS | 7832 | #ifdef CONFIG_SCHEDSTATS |
7922 | p->se.wait_start = 0; | 7833 | p->se.statistics.wait_start = 0; |
7923 | p->se.sleep_start = 0; | 7834 | p->se.statistics.sleep_start = 0; |
7924 | p->se.block_start = 0; | 7835 | p->se.statistics.block_start = 0; |
7925 | #endif | 7836 | #endif |
7926 | 7837 | ||
7927 | if (!rt_task(p)) { | 7838 | if (!rt_task(p)) { |
@@ -8254,8 +8165,6 @@ void sched_move_task(struct task_struct *tsk) | |||
8254 | 8165 | ||
8255 | rq = task_rq_lock(tsk, &flags); | 8166 | rq = task_rq_lock(tsk, &flags); |
8256 | 8167 | ||
8257 | update_rq_clock(rq); | ||
8258 | |||
8259 | running = task_current(rq, tsk); | 8168 | running = task_current(rq, tsk); |
8260 | on_rq = tsk->se.on_rq; | 8169 | on_rq = tsk->se.on_rq; |
8261 | 8170 | ||
@@ -8274,7 +8183,7 @@ void sched_move_task(struct task_struct *tsk) | |||
8274 | if (unlikely(running)) | 8183 | if (unlikely(running)) |
8275 | tsk->sched_class->set_curr_task(rq); | 8184 | tsk->sched_class->set_curr_task(rq); |
8276 | if (on_rq) | 8185 | if (on_rq) |
8277 | enqueue_task(rq, tsk, 0, false); | 8186 | enqueue_task(rq, tsk, 0); |
8278 | 8187 | ||
8279 | task_rq_unlock(rq, &flags); | 8188 | task_rq_unlock(rq, &flags); |
8280 | } | 8189 | } |