diff options
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 71 |
1 files changed, 27 insertions, 44 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..89e7283015a6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
| 2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); | 2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
| 2476 | 2476 | ||
| 2477 | /* | 2477 | /* |
| 2478 | * Return any ns on the sched_clock that have not yet been accounted in | ||
| 2479 | * @p in case that task is currently running. | ||
| 2480 | * | ||
| 2481 | * Called with task_rq_lock() held on @rq. | ||
| 2482 | */ | ||
| 2483 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | ||
| 2484 | { | ||
| 2485 | u64 ns = 0; | ||
| 2486 | |||
| 2487 | /* | ||
| 2488 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
| 2489 | * project cycles that may never be accounted to this | ||
| 2490 | * thread, breaking clock_gettime(). | ||
| 2491 | */ | ||
| 2492 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
| 2493 | update_rq_clock(rq); | ||
| 2494 | ns = rq_clock_task(rq) - p->se.exec_start; | ||
| 2495 | if ((s64)ns < 0) | ||
| 2496 | ns = 0; | ||
| 2497 | } | ||
| 2498 | |||
| 2499 | return ns; | ||
| 2500 | } | ||
| 2501 | |||
| 2502 | unsigned long long task_delta_exec(struct task_struct *p) | ||
| 2503 | { | ||
| 2504 | unsigned long flags; | ||
| 2505 | struct rq *rq; | ||
| 2506 | u64 ns = 0; | ||
| 2507 | |||
| 2508 | rq = task_rq_lock(p, &flags); | ||
| 2509 | ns = do_task_delta_exec(p, rq); | ||
| 2510 | task_rq_unlock(rq, p, &flags); | ||
| 2511 | |||
| 2512 | return ns; | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | /* | ||
| 2516 | * Return accounted runtime for the task. | 2478 | * Return accounted runtime for the task. |
| 2517 | * In case the task is currently running, return the runtime plus current's | 2479 | * In case the task is currently running, return the runtime plus current's |
| 2518 | * pending runtime that have not been accounted yet. | 2480 | * pending runtime that have not been accounted yet. |
| @@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
| 2521 | { | 2483 | { |
| 2522 | unsigned long flags; | 2484 | unsigned long flags; |
| 2523 | struct rq *rq; | 2485 | struct rq *rq; |
| 2524 | u64 ns = 0; | 2486 | u64 ns; |
| 2525 | 2487 | ||
| 2526 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) | 2488 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
| 2527 | /* | 2489 | /* |
| @@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
| 2540 | #endif | 2502 | #endif |
| 2541 | 2503 | ||
| 2542 | rq = task_rq_lock(p, &flags); | 2504 | rq = task_rq_lock(p, &flags); |
| 2543 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); | 2505 | /* |
| 2506 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
| 2507 | * project cycles that may never be accounted to this | ||
| 2508 | * thread, breaking clock_gettime(). | ||
| 2509 | */ | ||
| 2510 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
| 2511 | update_rq_clock(rq); | ||
| 2512 | p->sched_class->update_curr(rq); | ||
| 2513 | } | ||
| 2514 | ns = p->se.sum_exec_runtime; | ||
| 2544 | task_rq_unlock(rq, p, &flags); | 2515 | task_rq_unlock(rq, p, &flags); |
| 2545 | 2516 | ||
| 2546 | return ns; | 2517 | return ns; |
| @@ -2903,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void) | |||
| 2903 | * or we have been woken up remotely but the IPI has not yet arrived, | 2874 | * or we have been woken up remotely but the IPI has not yet arrived, |
| 2904 | * we haven't yet exited the RCU idle mode. Do it here manually until | 2875 | * we haven't yet exited the RCU idle mode. Do it here manually until |
| 2905 | * we find a better solution. | 2876 | * we find a better solution. |
| 2877 | * | ||
| 2878 | * NB: There are buggy callers of this function. Ideally we | ||
| 2879 | * should warn if prev_state != IN_USER, but that will trigger | ||
| 2880 | * too frequently to make sense yet. | ||
| 2906 | */ | 2881 | */ |
| 2907 | user_exit(); | 2882 | enum ctx_state prev_state = exception_enter(); |
| 2908 | schedule(); | 2883 | schedule(); |
| 2909 | user_enter(); | 2884 | exception_exit(prev_state); |
| 2910 | } | 2885 | } |
| 2911 | #endif | 2886 | #endif |
| 2912 | 2887 | ||
| @@ -6368,6 +6343,10 @@ static void sched_init_numa(void) | |||
| 6368 | if (!sched_debug()) | 6343 | if (!sched_debug()) |
| 6369 | break; | 6344 | break; |
| 6370 | } | 6345 | } |
| 6346 | |||
| 6347 | if (!level) | ||
| 6348 | return; | ||
| 6349 | |||
| 6371 | /* | 6350 | /* |
| 6372 | * 'level' contains the number of unique distances, excluding the | 6351 | * 'level' contains the number of unique distances, excluding the |
| 6373 | * identity distance node_distance(i,i). | 6352 | * identity distance node_distance(i,i). |
| @@ -7444,8 +7423,12 @@ void sched_move_task(struct task_struct *tsk) | |||
| 7444 | if (unlikely(running)) | 7423 | if (unlikely(running)) |
| 7445 | put_prev_task(rq, tsk); | 7424 | put_prev_task(rq, tsk); |
| 7446 | 7425 | ||
| 7447 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, | 7426 | /* |
| 7448 | lockdep_is_held(&tsk->sighand->siglock)), | 7427 | * All callers are synchronized by task_rq_lock(); we do not use RCU |
| 7428 | * which is pointless here. Thus, we pass "true" to task_css_check() | ||
| 7429 | * to prevent lockdep warnings. | ||
| 7430 | */ | ||
| 7431 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), | ||
| 7449 | struct task_group, css); | 7432 | struct task_group, css); |
| 7450 | tg = autogroup_task_group(tsk, tg); | 7433 | tg = autogroup_task_group(tsk, tg); |
| 7451 | tsk->sched_task_group = tg; | 7434 | tsk->sched_task_group = tg; |
