aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1a48cdbc8631..3c4dec0594d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2081,6 +2081,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2081#endif 2081#endif
2082 2082
2083 /* Here we just switch the register state and the stack. */ 2083 /* Here we just switch the register state and the stack. */
2084 rcu_switch(prev, next);
2084 switch_to(prev, next, prev); 2085 switch_to(prev, next, prev);
2085 2086
2086 barrier(); 2087 barrier();
@@ -3468,6 +3469,21 @@ asmlinkage void __sched schedule(void)
3468} 3469}
3469EXPORT_SYMBOL(schedule); 3470EXPORT_SYMBOL(schedule);
3470 3471
3472#ifdef CONFIG_RCU_USER_QS
3473asmlinkage void __sched schedule_user(void)
3474{
3475 /*
3476 * If we come here after a random call to set_need_resched(),
3477 * or we have been woken up remotely but the IPI has not yet arrived,
3478 * we haven't yet exited the RCU idle mode. Do it here manually until
3479 * we find a better solution.
3480 */
3481 rcu_user_exit();
3482 schedule();
3483 rcu_user_enter();
3484}
3485#endif
3486
3471/** 3487/**
3472 * schedule_preempt_disabled - called with preemption disabled 3488 * schedule_preempt_disabled - called with preemption disabled
3473 * 3489 *
@@ -3569,6 +3585,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
3569 /* Catch callers which need to be fixed */ 3585 /* Catch callers which need to be fixed */
3570 BUG_ON(ti->preempt_count || !irqs_disabled()); 3586 BUG_ON(ti->preempt_count || !irqs_disabled());
3571 3587
3588 rcu_user_exit();
3572 do { 3589 do {
3573 add_preempt_count(PREEMPT_ACTIVE); 3590 add_preempt_count(PREEMPT_ACTIVE);
3574 local_irq_enable(); 3591 local_irq_enable();