aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-12-08 20:39:29 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-12-08 20:39:29 -0500
commitba00410b8131b23edfb0e09f8b6dd26c8eb621fb (patch)
treec08504e4d2fa51ac91cef544f336d0169806c49f /kernel/sched/core.c
parent8ce74dd6057832618957fc2cbd38fa959c3a0a6c (diff)
parentaa583096d9767892983332e7c1a984bd17e3cd39 (diff)
Merge branch 'iov_iter' into for-next
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c110
1 files changed, 68 insertions, 42 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 44999505e1bf..24beb9bb4c3e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2475EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2475EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2476 2476
2477/* 2477/*
2478 * Return any ns on the sched_clock that have not yet been accounted in
2479 * @p in case that task is currently running.
2480 *
2481 * Called with task_rq_lock() held on @rq.
2482 */
2483static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2484{
2485 u64 ns = 0;
2486
2487 /*
2488 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2489 * project cycles that may never be accounted to this
2490 * thread, breaking clock_gettime().
2491 */
2492 if (task_current(rq, p) && task_on_rq_queued(p)) {
2493 update_rq_clock(rq);
2494 ns = rq_clock_task(rq) - p->se.exec_start;
2495 if ((s64)ns < 0)
2496 ns = 0;
2497 }
2498
2499 return ns;
2500}
2501
2502unsigned long long task_delta_exec(struct task_struct *p)
2503{
2504 unsigned long flags;
2505 struct rq *rq;
2506 u64 ns = 0;
2507
2508 rq = task_rq_lock(p, &flags);
2509 ns = do_task_delta_exec(p, rq);
2510 task_rq_unlock(rq, p, &flags);
2511
2512 return ns;
2513}
2514
2515/*
2516 * Return accounted runtime for the task. 2478 * Return accounted runtime for the task.
2517 * In case the task is currently running, return the runtime plus current's 2479 * In case the task is currently running, return the runtime plus current's
2518 * pending runtime that have not been accounted yet. 2480 * pending runtime that have not been accounted yet.
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2521{ 2483{
2522 unsigned long flags; 2484 unsigned long flags;
2523 struct rq *rq; 2485 struct rq *rq;
2524 u64 ns = 0; 2486 u64 ns;
2525 2487
2526#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2488#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2527 /* 2489 /*
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2540#endif 2502#endif
2541 2503
2542 rq = task_rq_lock(p, &flags); 2504 rq = task_rq_lock(p, &flags);
2543 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); 2505 /*
2506 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2507 * project cycles that may never be accounted to this
2508 * thread, breaking clock_gettime().
2509 */
2510 if (task_current(rq, p) && task_on_rq_queued(p)) {
2511 update_rq_clock(rq);
2512 p->sched_class->update_curr(rq);
2513 }
2514 ns = p->se.sum_exec_runtime;
2544 task_rq_unlock(rq, p, &flags); 2515 task_rq_unlock(rq, p, &flags);
2545 2516
2546 return ns; 2517 return ns;
@@ -2951,6 +2922,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
2951} 2922}
2952NOKPROBE_SYMBOL(preempt_schedule); 2923NOKPROBE_SYMBOL(preempt_schedule);
2953EXPORT_SYMBOL(preempt_schedule); 2924EXPORT_SYMBOL(preempt_schedule);
2925
2926#ifdef CONFIG_CONTEXT_TRACKING
2927/**
2928 * preempt_schedule_context - preempt_schedule called by tracing
2929 *
2930 * The tracing infrastructure uses preempt_enable_notrace to prevent
2931 * recursion and tracing preempt enabling caused by the tracing
2932 * infrastructure itself. But as tracing can happen in areas coming
2933 * from userspace or just about to enter userspace, a preempt enable
2934 * can occur before user_exit() is called. This will cause the scheduler
2935 * to be called when the system is still in usermode.
2936 *
2937 * To prevent this, the preempt_enable_notrace will use this function
2938 * instead of preempt_schedule() to exit user context if needed before
2939 * calling the scheduler.
2940 */
2941asmlinkage __visible void __sched notrace preempt_schedule_context(void)
2942{
2943 enum ctx_state prev_ctx;
2944
2945 if (likely(!preemptible()))
2946 return;
2947
2948 do {
2949 __preempt_count_add(PREEMPT_ACTIVE);
2950 /*
2951 * Needs preempt disabled in case user_exit() is traced
2952 * and the tracer calls preempt_enable_notrace() causing
2953 * an infinite recursion.
2954 */
2955 prev_ctx = exception_enter();
2956 __schedule();
2957 exception_exit(prev_ctx);
2958
2959 __preempt_count_sub(PREEMPT_ACTIVE);
2960 barrier();
2961 } while (need_resched());
2962}
2963EXPORT_SYMBOL_GPL(preempt_schedule_context);
2964#endif /* CONFIG_CONTEXT_TRACKING */
2965
2954#endif /* CONFIG_PREEMPT */ 2966#endif /* CONFIG_PREEMPT */
2955 2967
2956/* 2968/*
@@ -6327,6 +6339,10 @@ static void sched_init_numa(void)
6327 if (!sched_debug()) 6339 if (!sched_debug())
6328 break; 6340 break;
6329 } 6341 }
6342
6343 if (!level)
6344 return;
6345
6330 /* 6346 /*
6331 * 'level' contains the number of unique distances, excluding the 6347 * 'level' contains the number of unique distances, excluding the
6332 * identity distance node_distance(i,i). 6348 * identity distance node_distance(i,i).
@@ -7403,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk)
7403 if (unlikely(running)) 7419 if (unlikely(running))
7404 put_prev_task(rq, tsk); 7420 put_prev_task(rq, tsk);
7405 7421
7406 tg = container_of(task_css_check(tsk, cpu_cgrp_id, 7422 /*
7407 lockdep_is_held(&tsk->sighand->siglock)), 7423 * All callers are synchronized by task_rq_lock(); we do not use RCU
7424 * which is pointless here. Thus, we pass "true" to task_css_check()
7425 * to prevent lockdep warnings.
7426 */
7427 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7408 struct task_group, css); 7428 struct task_group, css);
7409 tg = autogroup_task_group(tsk, tg); 7429 tg = autogroup_task_group(tsk, tg);
7410 tsk->sched_task_group = tg; 7430 tsk->sched_task_group = tg;
@@ -7833,6 +7853,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7833 sched_offline_group(tg); 7853 sched_offline_group(tg);
7834} 7854}
7835 7855
7856static void cpu_cgroup_fork(struct task_struct *task)
7857{
7858 sched_move_task(task);
7859}
7860
7836static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 7861static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7837 struct cgroup_taskset *tset) 7862 struct cgroup_taskset *tset)
7838{ 7863{
@@ -8205,6 +8230,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
8205 .css_free = cpu_cgroup_css_free, 8230 .css_free = cpu_cgroup_css_free,
8206 .css_online = cpu_cgroup_css_online, 8231 .css_online = cpu_cgroup_css_online,
8207 .css_offline = cpu_cgroup_css_offline, 8232 .css_offline = cpu_cgroup_css_offline,
8233 .fork = cpu_cgroup_fork,
8208 .can_attach = cpu_cgroup_can_attach, 8234 .can_attach = cpu_cgroup_can_attach,
8209 .attach = cpu_cgroup_attach, 8235 .attach = cpu_cgroup_attach,
8210 .exit = cpu_cgroup_exit, 8236 .exit = cpu_cgroup_exit,