diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 63 |
1 files changed, 21 insertions, 42 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..24beb9bb4c3e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); | 2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
2476 | 2476 | ||
2477 | /* | 2477 | /* |
2478 | * Return any ns on the sched_clock that have not yet been accounted in | ||
2479 | * @p in case that task is currently running. | ||
2480 | * | ||
2481 | * Called with task_rq_lock() held on @rq. | ||
2482 | */ | ||
2483 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | ||
2484 | { | ||
2485 | u64 ns = 0; | ||
2486 | |||
2487 | /* | ||
2488 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
2489 | * project cycles that may never be accounted to this | ||
2490 | * thread, breaking clock_gettime(). | ||
2491 | */ | ||
2492 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
2493 | update_rq_clock(rq); | ||
2494 | ns = rq_clock_task(rq) - p->se.exec_start; | ||
2495 | if ((s64)ns < 0) | ||
2496 | ns = 0; | ||
2497 | } | ||
2498 | |||
2499 | return ns; | ||
2500 | } | ||
2501 | |||
2502 | unsigned long long task_delta_exec(struct task_struct *p) | ||
2503 | { | ||
2504 | unsigned long flags; | ||
2505 | struct rq *rq; | ||
2506 | u64 ns = 0; | ||
2507 | |||
2508 | rq = task_rq_lock(p, &flags); | ||
2509 | ns = do_task_delta_exec(p, rq); | ||
2510 | task_rq_unlock(rq, p, &flags); | ||
2511 | |||
2512 | return ns; | ||
2513 | } | ||
2514 | |||
2515 | /* | ||
2516 | * Return accounted runtime for the task. | 2478 | * Return accounted runtime for the task. |
2517 | * In case the task is currently running, return the runtime plus current's | 2479 | * In case the task is currently running, return the runtime plus current's |
2518 | * pending runtime that have not been accounted yet. | 2480 | * pending runtime that have not been accounted yet. |
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2521 | { | 2483 | { |
2522 | unsigned long flags; | 2484 | unsigned long flags; |
2523 | struct rq *rq; | 2485 | struct rq *rq; |
2524 | u64 ns = 0; | 2486 | u64 ns; |
2525 | 2487 | ||
2526 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) | 2488 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
2527 | /* | 2489 | /* |
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2540 | #endif | 2502 | #endif |
2541 | 2503 | ||
2542 | rq = task_rq_lock(p, &flags); | 2504 | rq = task_rq_lock(p, &flags); |
2543 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); | 2505 | /* |
2506 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
2507 | * project cycles that may never be accounted to this | ||
2508 | * thread, breaking clock_gettime(). | ||
2509 | */ | ||
2510 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
2511 | update_rq_clock(rq); | ||
2512 | p->sched_class->update_curr(rq); | ||
2513 | } | ||
2514 | ns = p->se.sum_exec_runtime; | ||
2544 | task_rq_unlock(rq, p, &flags); | 2515 | task_rq_unlock(rq, p, &flags); |
2545 | 2516 | ||
2546 | return ns; | 2517 | return ns; |
@@ -6368,6 +6339,10 @@ static void sched_init_numa(void) | |||
6368 | if (!sched_debug()) | 6339 | if (!sched_debug()) |
6369 | break; | 6340 | break; |
6370 | } | 6341 | } |
6342 | |||
6343 | if (!level) | ||
6344 | return; | ||
6345 | |||
6371 | /* | 6346 | /* |
6372 | * 'level' contains the number of unique distances, excluding the | 6347 | * 'level' contains the number of unique distances, excluding the |
6373 | * identity distance node_distance(i,i). | 6348 | * identity distance node_distance(i,i). |
@@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk) | |||
7444 | if (unlikely(running)) | 7419 | if (unlikely(running)) |
7445 | put_prev_task(rq, tsk); | 7420 | put_prev_task(rq, tsk); |
7446 | 7421 | ||
7447 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, | 7422 | /* |
7448 | lockdep_is_held(&tsk->sighand->siglock)), | 7423 | * All callers are synchronized by task_rq_lock(); we do not use RCU |
7424 | * which is pointless here. Thus, we pass "true" to task_css_check() | ||
7425 | * to prevent lockdep warnings. | ||
7426 | */ | ||
7427 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), | ||
7449 | struct task_group, css); | 7428 | struct task_group, css); |
7450 | tg = autogroup_task_group(tsk, tg); | 7429 | tg = autogroup_task_group(tsk, tg); |
7451 | tsk->sched_task_group = tg; | 7430 | tsk->sched_task_group = tg; |