diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 44999505e1bf..240157c13ddc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2951,6 +2951,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) | |||
2951 | } | 2951 | } |
2952 | NOKPROBE_SYMBOL(preempt_schedule); | 2952 | NOKPROBE_SYMBOL(preempt_schedule); |
2953 | EXPORT_SYMBOL(preempt_schedule); | 2953 | EXPORT_SYMBOL(preempt_schedule); |
2954 | |||
2955 | #ifdef CONFIG_CONTEXT_TRACKING | ||
2956 | /** | ||
2957 | * preempt_schedule_context - preempt_schedule called by tracing | ||
2958 | * | ||
2959 | * The tracing infrastructure uses preempt_enable_notrace to prevent | ||
2960 | * recursion and tracing preempt enabling caused by the tracing | ||
2961 | * infrastructure itself. But as tracing can happen in areas coming | ||
2962 | * from userspace or just about to enter userspace, a preempt enable | ||
2963 | * can occur before user_exit() is called. This will cause the scheduler | ||
2964 | * to be called when the system is still in usermode. | ||
2965 | * | ||
2966 | * To prevent this, the preempt_enable_notrace will use this function | ||
2967 | * instead of preempt_schedule() to exit user context if needed before | ||
2968 | * calling the scheduler. | ||
2969 | */ | ||
2970 | asmlinkage __visible void __sched notrace preempt_schedule_context(void) | ||
2971 | { | ||
2972 | enum ctx_state prev_ctx; | ||
2973 | |||
2974 | if (likely(!preemptible())) | ||
2975 | return; | ||
2976 | |||
2977 | do { | ||
2978 | __preempt_count_add(PREEMPT_ACTIVE); | ||
2979 | /* | ||
2980 | * Needs preempt disabled in case user_exit() is traced | ||
2981 | * and the tracer calls preempt_enable_notrace() causing | ||
2982 | * an infinite recursion. | ||
2983 | */ | ||
2984 | prev_ctx = exception_enter(); | ||
2985 | __schedule(); | ||
2986 | exception_exit(prev_ctx); | ||
2987 | |||
2988 | __preempt_count_sub(PREEMPT_ACTIVE); | ||
2989 | barrier(); | ||
2990 | } while (need_resched()); | ||
2991 | } | ||
2992 | EXPORT_SYMBOL_GPL(preempt_schedule_context); | ||
2993 | #endif /* CONFIG_CONTEXT_TRACKING */ | ||
2994 | |||
2954 | #endif /* CONFIG_PREEMPT */ | 2995 | #endif /* CONFIG_PREEMPT */ |
2955 | 2996 | ||
2956 | /* | 2997 | /* |
@@ -7833,6 +7874,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
7833 | sched_offline_group(tg); | 7874 | sched_offline_group(tg); |
7834 | } | 7875 | } |
7835 | 7876 | ||
7877 | static void cpu_cgroup_fork(struct task_struct *task) | ||
7878 | { | ||
7879 | sched_move_task(task); | ||
7880 | } | ||
7881 | |||
7836 | static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, | 7882 | static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, |
7837 | struct cgroup_taskset *tset) | 7883 | struct cgroup_taskset *tset) |
7838 | { | 7884 | { |
@@ -8205,6 +8251,7 @@ struct cgroup_subsys cpu_cgrp_subsys = { | |||
8205 | .css_free = cpu_cgroup_css_free, | 8251 | .css_free = cpu_cgroup_css_free, |
8206 | .css_online = cpu_cgroup_css_online, | 8252 | .css_online = cpu_cgroup_css_online, |
8207 | .css_offline = cpu_cgroup_css_offline, | 8253 | .css_offline = cpu_cgroup_css_offline, |
8254 | .fork = cpu_cgroup_fork, | ||
8208 | .can_attach = cpu_cgroup_can_attach, | 8255 | .can_attach = cpu_cgroup_can_attach, |
8209 | .attach = cpu_cgroup_attach, | 8256 | .attach = cpu_cgroup_attach, |
8210 | .exit = cpu_cgroup_exit, | 8257 | .exit = cpu_cgroup_exit, |